]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
block: acquire AioContext in bdrv_drain_all()
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
38
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
48
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
52
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
56 };
57
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
59
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
99
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109 }
110
111 int is_windows_drive(const char *filename)
112 {
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120 }
121 #endif
122
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126 {
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134 }
135
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138 {
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154 }
155
156 void bdrv_io_limits_disable(BlockDriverState *bs)
157 {
158 bs->io_limits_enabled = false;
159
160 bdrv_start_throttled_reqs(bs);
161
162 throttle_destroy(&bs->throttle_state);
163 }
164
165 static void bdrv_throttle_read_timer_cb(void *opaque)
166 {
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
169 }
170
171 static void bdrv_throttle_write_timer_cb(void *opaque)
172 {
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175 }
176
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
179 {
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 QEMU_CLOCK_VIRTUAL,
183 bdrv_throttle_read_timer_cb,
184 bdrv_throttle_write_timer_cb,
185 bs);
186 bs->io_limits_enabled = true;
187 }
188
189 /* This function makes an IO wait if needed
190 *
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
193 */
194 static void bdrv_io_limits_intercept(BlockDriverState *bs,
195 unsigned int bytes,
196 bool is_write)
197 {
198 /* does this io must wait */
199 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
200
201 /* if must wait or any request of this type throttled queue the IO */
202 if (must_wait ||
203 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
204 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
205 }
206
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs->throttle_state, is_write, bytes);
209
210
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
213 return;
214 }
215
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
218 }
219
220 size_t bdrv_opt_mem_align(BlockDriverState *bs)
221 {
222 if (!bs || !bs->drv) {
223 /* 4k should be on the safe side */
224 return 4096;
225 }
226
227 return bs->bl.opt_mem_alignment;
228 }
229
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path)
232 {
233 const char *p;
234
235 #ifdef _WIN32
236 if (is_windows_drive(path) ||
237 is_windows_drive_prefix(path)) {
238 return 0;
239 }
240 p = path + strcspn(path, ":/\\");
241 #else
242 p = path + strcspn(path, ":/");
243 #endif
244
245 return *p == ':';
246 }
247
248 int path_is_absolute(const char *path)
249 {
250 #ifdef _WIN32
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
253 return 1;
254 }
255 return (*path == '/' || *path == '\\');
256 #else
257 return (*path == '/');
258 #endif
259 }
260
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
263 supported. */
264 void path_combine(char *dest, int dest_size,
265 const char *base_path,
266 const char *filename)
267 {
268 const char *p, *p1;
269 int len;
270
271 if (dest_size <= 0)
272 return;
273 if (path_is_absolute(filename)) {
274 pstrcpy(dest, dest_size, filename);
275 } else {
276 p = strchr(base_path, ':');
277 if (p)
278 p++;
279 else
280 p = base_path;
281 p1 = strrchr(base_path, '/');
282 #ifdef _WIN32
283 {
284 const char *p2;
285 p2 = strrchr(base_path, '\\');
286 if (!p1 || p2 > p1)
287 p1 = p2;
288 }
289 #endif
290 if (p1)
291 p1++;
292 else
293 p1 = base_path;
294 if (p1 > p)
295 p = p1;
296 len = p - base_path;
297 if (len > dest_size - 1)
298 len = dest_size - 1;
299 memcpy(dest, base_path, len);
300 dest[len] = '\0';
301 pstrcat(dest, dest_size, filename);
302 }
303 }
304
305 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
306 {
307 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
308 pstrcpy(dest, sz, bs->backing_file);
309 } else {
310 path_combine(dest, sz, bs->filename, bs->backing_file);
311 }
312 }
313
314 void bdrv_register(BlockDriver *bdrv)
315 {
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv->bdrv_co_readv) {
318 bdrv->bdrv_co_readv = bdrv_co_readv_em;
319 bdrv->bdrv_co_writev = bdrv_co_writev_em;
320
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
323 */
324 if (!bdrv->bdrv_aio_readv) {
325 /* add AIO emulation layer */
326 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
327 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
328 }
329 }
330
331 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
332 }
333
334 /* create a new block device (by default it is empty) */
335 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
336 {
337 BlockDriverState *bs;
338 int i;
339
340 if (bdrv_find(device_name)) {
341 error_setg(errp, "Device with id '%s' already exists",
342 device_name);
343 return NULL;
344 }
345 if (bdrv_find_node(device_name)) {
346 error_setg(errp, "Device with node-name '%s' already exists",
347 device_name);
348 return NULL;
349 }
350
351 bs = g_malloc0(sizeof(BlockDriverState));
352 QLIST_INIT(&bs->dirty_bitmaps);
353 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
354 if (device_name[0] != '\0') {
355 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
356 }
357 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
358 QLIST_INIT(&bs->op_blockers[i]);
359 }
360 bdrv_iostatus_disable(bs);
361 notifier_list_init(&bs->close_notifiers);
362 notifier_with_return_list_init(&bs->before_write_notifiers);
363 qemu_co_queue_init(&bs->throttled_reqs[0]);
364 qemu_co_queue_init(&bs->throttled_reqs[1]);
365 bs->refcnt = 1;
366
367 return bs;
368 }
369
370 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
371 {
372 notifier_list_add(&bs->close_notifiers, notify);
373 }
374
375 BlockDriver *bdrv_find_format(const char *format_name)
376 {
377 BlockDriver *drv1;
378 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
379 if (!strcmp(drv1->format_name, format_name)) {
380 return drv1;
381 }
382 }
383 return NULL;
384 }
385
386 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
387 {
388 static const char *whitelist_rw[] = {
389 CONFIG_BDRV_RW_WHITELIST
390 };
391 static const char *whitelist_ro[] = {
392 CONFIG_BDRV_RO_WHITELIST
393 };
394 const char **p;
395
396 if (!whitelist_rw[0] && !whitelist_ro[0]) {
397 return 1; /* no whitelist, anything goes */
398 }
399
400 for (p = whitelist_rw; *p; p++) {
401 if (!strcmp(drv->format_name, *p)) {
402 return 1;
403 }
404 }
405 if (read_only) {
406 for (p = whitelist_ro; *p; p++) {
407 if (!strcmp(drv->format_name, *p)) {
408 return 1;
409 }
410 }
411 }
412 return 0;
413 }
414
415 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
416 bool read_only)
417 {
418 BlockDriver *drv = bdrv_find_format(format_name);
419 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
420 }
421
422 typedef struct CreateCo {
423 BlockDriver *drv;
424 char *filename;
425 QEMUOptionParameter *options;
426 int ret;
427 Error *err;
428 } CreateCo;
429
430 static void coroutine_fn bdrv_create_co_entry(void *opaque)
431 {
432 Error *local_err = NULL;
433 int ret;
434
435 CreateCo *cco = opaque;
436 assert(cco->drv);
437
438 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
439 if (local_err) {
440 error_propagate(&cco->err, local_err);
441 }
442 cco->ret = ret;
443 }
444
445 int bdrv_create(BlockDriver *drv, const char* filename,
446 QEMUOptionParameter *options, Error **errp)
447 {
448 int ret;
449
450 Coroutine *co;
451 CreateCo cco = {
452 .drv = drv,
453 .filename = g_strdup(filename),
454 .options = options,
455 .ret = NOT_DONE,
456 .err = NULL,
457 };
458
459 if (!drv->bdrv_create) {
460 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
461 ret = -ENOTSUP;
462 goto out;
463 }
464
465 if (qemu_in_coroutine()) {
466 /* Fast-path if already in coroutine context */
467 bdrv_create_co_entry(&cco);
468 } else {
469 co = qemu_coroutine_create(bdrv_create_co_entry);
470 qemu_coroutine_enter(co, &cco);
471 while (cco.ret == NOT_DONE) {
472 qemu_aio_wait();
473 }
474 }
475
476 ret = cco.ret;
477 if (ret < 0) {
478 if (cco.err) {
479 error_propagate(errp, cco.err);
480 } else {
481 error_setg_errno(errp, -ret, "Could not create image");
482 }
483 }
484
485 out:
486 g_free(cco.filename);
487 return ret;
488 }
489
490 int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
491 Error **errp)
492 {
493 BlockDriver *drv;
494 Error *local_err = NULL;
495 int ret;
496
497 drv = bdrv_find_protocol(filename, true);
498 if (drv == NULL) {
499 error_setg(errp, "Could not find protocol for file '%s'", filename);
500 return -ENOENT;
501 }
502
503 ret = bdrv_create(drv, filename, options, &local_err);
504 if (local_err) {
505 error_propagate(errp, local_err);
506 }
507 return ret;
508 }
509
510 int bdrv_refresh_limits(BlockDriverState *bs)
511 {
512 BlockDriver *drv = bs->drv;
513
514 memset(&bs->bl, 0, sizeof(bs->bl));
515
516 if (!drv) {
517 return 0;
518 }
519
520 /* Take some limits from the children as a default */
521 if (bs->file) {
522 bdrv_refresh_limits(bs->file);
523 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
524 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
525 } else {
526 bs->bl.opt_mem_alignment = 512;
527 }
528
529 if (bs->backing_hd) {
530 bdrv_refresh_limits(bs->backing_hd);
531 bs->bl.opt_transfer_length =
532 MAX(bs->bl.opt_transfer_length,
533 bs->backing_hd->bl.opt_transfer_length);
534 bs->bl.opt_mem_alignment =
535 MAX(bs->bl.opt_mem_alignment,
536 bs->backing_hd->bl.opt_mem_alignment);
537 }
538
539 /* Then let the driver override it */
540 if (drv->bdrv_refresh_limits) {
541 return drv->bdrv_refresh_limits(bs);
542 }
543
544 return 0;
545 }
546
547 /*
548 * Create a uniquely-named empty temporary file.
549 * Return 0 upon success, otherwise a negative errno value.
550 */
551 int get_tmp_filename(char *filename, int size)
552 {
553 #ifdef _WIN32
554 char temp_dir[MAX_PATH];
555 /* GetTempFileName requires that its output buffer (4th param)
556 have length MAX_PATH or greater. */
557 assert(size >= MAX_PATH);
558 return (GetTempPath(MAX_PATH, temp_dir)
559 && GetTempFileName(temp_dir, "qem", 0, filename)
560 ? 0 : -GetLastError());
561 #else
562 int fd;
563 const char *tmpdir;
564 tmpdir = getenv("TMPDIR");
565 if (!tmpdir) {
566 tmpdir = "/var/tmp";
567 }
568 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
569 return -EOVERFLOW;
570 }
571 fd = mkstemp(filename);
572 if (fd < 0) {
573 return -errno;
574 }
575 if (close(fd) != 0) {
576 unlink(filename);
577 return -errno;
578 }
579 return 0;
580 #endif
581 }
582
583 /*
584 * Detect host devices. By convention, /dev/cdrom[N] is always
585 * recognized as a host CDROM.
586 */
587 static BlockDriver *find_hdev_driver(const char *filename)
588 {
589 int score_max = 0, score;
590 BlockDriver *drv = NULL, *d;
591
592 QLIST_FOREACH(d, &bdrv_drivers, list) {
593 if (d->bdrv_probe_device) {
594 score = d->bdrv_probe_device(filename);
595 if (score > score_max) {
596 score_max = score;
597 drv = d;
598 }
599 }
600 }
601
602 return drv;
603 }
604
605 BlockDriver *bdrv_find_protocol(const char *filename,
606 bool allow_protocol_prefix)
607 {
608 BlockDriver *drv1;
609 char protocol[128];
610 int len;
611 const char *p;
612
613 /* TODO Drivers without bdrv_file_open must be specified explicitly */
614
615 /*
616 * XXX(hch): we really should not let host device detection
617 * override an explicit protocol specification, but moving this
618 * later breaks access to device names with colons in them.
619 * Thanks to the brain-dead persistent naming schemes on udev-
620 * based Linux systems those actually are quite common.
621 */
622 drv1 = find_hdev_driver(filename);
623 if (drv1) {
624 return drv1;
625 }
626
627 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
628 return bdrv_find_format("file");
629 }
630
631 p = strchr(filename, ':');
632 assert(p != NULL);
633 len = p - filename;
634 if (len > sizeof(protocol) - 1)
635 len = sizeof(protocol) - 1;
636 memcpy(protocol, filename, len);
637 protocol[len] = '\0';
638 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
639 if (drv1->protocol_name &&
640 !strcmp(drv1->protocol_name, protocol)) {
641 return drv1;
642 }
643 }
644 return NULL;
645 }
646
647 static int find_image_format(BlockDriverState *bs, const char *filename,
648 BlockDriver **pdrv, Error **errp)
649 {
650 int score, score_max;
651 BlockDriver *drv1, *drv;
652 uint8_t buf[2048];
653 int ret = 0;
654
655 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
656 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
657 drv = bdrv_find_format("raw");
658 if (!drv) {
659 error_setg(errp, "Could not find raw image format");
660 ret = -ENOENT;
661 }
662 *pdrv = drv;
663 return ret;
664 }
665
666 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
667 if (ret < 0) {
668 error_setg_errno(errp, -ret, "Could not read image for determining its "
669 "format");
670 *pdrv = NULL;
671 return ret;
672 }
673
674 score_max = 0;
675 drv = NULL;
676 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
677 if (drv1->bdrv_probe) {
678 score = drv1->bdrv_probe(buf, ret, filename);
679 if (score > score_max) {
680 score_max = score;
681 drv = drv1;
682 }
683 }
684 }
685 if (!drv) {
686 error_setg(errp, "Could not determine image format: No compatible "
687 "driver found");
688 ret = -ENOENT;
689 }
690 *pdrv = drv;
691 return ret;
692 }
693
694 /**
695 * Set the current 'total_sectors' value
696 */
697 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
698 {
699 BlockDriver *drv = bs->drv;
700
701 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
702 if (bs->sg)
703 return 0;
704
705 /* query actual device if possible, otherwise just trust the hint */
706 if (drv->bdrv_getlength) {
707 int64_t length = drv->bdrv_getlength(bs);
708 if (length < 0) {
709 return length;
710 }
711 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
712 }
713
714 bs->total_sectors = hint;
715 return 0;
716 }
717
718 /**
719 * Set open flags for a given discard mode
720 *
721 * Return 0 on success, -1 if the discard mode was invalid.
722 */
723 int bdrv_parse_discard_flags(const char *mode, int *flags)
724 {
725 *flags &= ~BDRV_O_UNMAP;
726
727 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
728 /* do nothing */
729 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
730 *flags |= BDRV_O_UNMAP;
731 } else {
732 return -1;
733 }
734
735 return 0;
736 }
737
738 /**
739 * Set open flags for a given cache mode
740 *
741 * Return 0 on success, -1 if the cache mode was invalid.
742 */
743 int bdrv_parse_cache_flags(const char *mode, int *flags)
744 {
745 *flags &= ~BDRV_O_CACHE_MASK;
746
747 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
748 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
749 } else if (!strcmp(mode, "directsync")) {
750 *flags |= BDRV_O_NOCACHE;
751 } else if (!strcmp(mode, "writeback")) {
752 *flags |= BDRV_O_CACHE_WB;
753 } else if (!strcmp(mode, "unsafe")) {
754 *flags |= BDRV_O_CACHE_WB;
755 *flags |= BDRV_O_NO_FLUSH;
756 } else if (!strcmp(mode, "writethrough")) {
757 /* this is the default */
758 } else {
759 return -1;
760 }
761
762 return 0;
763 }
764
765 /**
766 * The copy-on-read flag is actually a reference count so multiple users may
767 * use the feature without worrying about clobbering its previous state.
768 * Copy-on-read stays enabled until all users have called to disable it.
769 */
770 void bdrv_enable_copy_on_read(BlockDriverState *bs)
771 {
772 bs->copy_on_read++;
773 }
774
775 void bdrv_disable_copy_on_read(BlockDriverState *bs)
776 {
777 assert(bs->copy_on_read > 0);
778 bs->copy_on_read--;
779 }
780
781 /*
782 * Returns the flags that a temporary snapshot should get, based on the
783 * originally requested flags (the originally requested image will have flags
784 * like a backing file)
785 */
786 static int bdrv_temp_snapshot_flags(int flags)
787 {
788 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
789 }
790
791 /*
792 * Returns the flags that bs->file should get, based on the given flags for
793 * the parent BDS
794 */
795 static int bdrv_inherited_flags(int flags)
796 {
797 /* Enable protocol handling, disable format probing for bs->file */
798 flags |= BDRV_O_PROTOCOL;
799
800 /* Our block drivers take care to send flushes and respect unmap policy,
801 * so we can enable both unconditionally on lower layers. */
802 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
803
804 /* Clear flags that only apply to the top layer */
805 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
806
807 return flags;
808 }
809
810 /*
811 * Returns the flags that bs->backing_hd should get, based on the given flags
812 * for the parent BDS
813 */
814 static int bdrv_backing_flags(int flags)
815 {
816 /* backing files always opened read-only */
817 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
818
819 /* snapshot=on is handled on the top layer */
820 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
821
822 return flags;
823 }
824
825 static int bdrv_open_flags(BlockDriverState *bs, int flags)
826 {
827 int open_flags = flags | BDRV_O_CACHE_WB;
828
829 /*
830 * Clear flags that are internal to the block layer before opening the
831 * image.
832 */
833 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
834
835 /*
836 * Snapshots should be writable.
837 */
838 if (flags & BDRV_O_TEMPORARY) {
839 open_flags |= BDRV_O_RDWR;
840 }
841
842 return open_flags;
843 }
844
845 static void bdrv_assign_node_name(BlockDriverState *bs,
846 const char *node_name,
847 Error **errp)
848 {
849 if (!node_name) {
850 return;
851 }
852
853 /* empty string node name is invalid */
854 if (node_name[0] == '\0') {
855 error_setg(errp, "Empty node name");
856 return;
857 }
858
859 /* takes care of avoiding namespaces collisions */
860 if (bdrv_find(node_name)) {
861 error_setg(errp, "node-name=%s is conflicting with a device id",
862 node_name);
863 return;
864 }
865
866 /* takes care of avoiding duplicates node names */
867 if (bdrv_find_node(node_name)) {
868 error_setg(errp, "Duplicate node name");
869 return;
870 }
871
872 /* copy node name into the bs and insert it into the graph list */
873 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
874 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
875 }
876
877 /*
878 * Common part for opening disk images and files
879 *
880 * Removes all processed options from *options.
881 */
882 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
883 QDict *options, int flags, BlockDriver *drv, Error **errp)
884 {
885 int ret, open_flags;
886 const char *filename;
887 const char *node_name = NULL;
888 Error *local_err = NULL;
889
890 assert(drv != NULL);
891 assert(bs->file == NULL);
892 assert(options != NULL && bs->options != options);
893
894 if (file != NULL) {
895 filename = file->filename;
896 } else {
897 filename = qdict_get_try_str(options, "filename");
898 }
899
900 if (drv->bdrv_needs_filename && !filename) {
901 error_setg(errp, "The '%s' block driver requires a file name",
902 drv->format_name);
903 return -EINVAL;
904 }
905
906 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
907
908 node_name = qdict_get_try_str(options, "node-name");
909 bdrv_assign_node_name(bs, node_name, &local_err);
910 if (local_err) {
911 error_propagate(errp, local_err);
912 return -EINVAL;
913 }
914 qdict_del(options, "node-name");
915
916 /* bdrv_open() with directly using a protocol as drv. This layer is already
917 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
918 * and return immediately. */
919 if (file != NULL && drv->bdrv_file_open) {
920 bdrv_swap(file, bs);
921 return 0;
922 }
923
924 bs->open_flags = flags;
925 bs->guest_block_size = 512;
926 bs->request_alignment = 512;
927 bs->zero_beyond_eof = true;
928 open_flags = bdrv_open_flags(bs, flags);
929 bs->read_only = !(open_flags & BDRV_O_RDWR);
930
931 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
932 error_setg(errp,
933 !bs->read_only && bdrv_is_whitelisted(drv, true)
934 ? "Driver '%s' can only be used for read-only devices"
935 : "Driver '%s' is not whitelisted",
936 drv->format_name);
937 return -ENOTSUP;
938 }
939
940 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
941 if (flags & BDRV_O_COPY_ON_READ) {
942 if (!bs->read_only) {
943 bdrv_enable_copy_on_read(bs);
944 } else {
945 error_setg(errp, "Can't use copy-on-read on read-only device");
946 return -EINVAL;
947 }
948 }
949
950 if (filename != NULL) {
951 pstrcpy(bs->filename, sizeof(bs->filename), filename);
952 } else {
953 bs->filename[0] = '\0';
954 }
955
956 bs->drv = drv;
957 bs->opaque = g_malloc0(drv->instance_size);
958
959 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
960
961 /* Open the image, either directly or using a protocol */
962 if (drv->bdrv_file_open) {
963 assert(file == NULL);
964 assert(!drv->bdrv_needs_filename || filename != NULL);
965 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
966 } else {
967 if (file == NULL) {
968 error_setg(errp, "Can't use '%s' as a block driver for the "
969 "protocol level", drv->format_name);
970 ret = -EINVAL;
971 goto free_and_fail;
972 }
973 bs->file = file;
974 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
975 }
976
977 if (ret < 0) {
978 if (local_err) {
979 error_propagate(errp, local_err);
980 } else if (bs->filename[0]) {
981 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
982 } else {
983 error_setg_errno(errp, -ret, "Could not open image");
984 }
985 goto free_and_fail;
986 }
987
988 ret = refresh_total_sectors(bs, bs->total_sectors);
989 if (ret < 0) {
990 error_setg_errno(errp, -ret, "Could not refresh total sector count");
991 goto free_and_fail;
992 }
993
994 bdrv_refresh_limits(bs);
995 assert(bdrv_opt_mem_align(bs) != 0);
996 assert((bs->request_alignment != 0) || bs->sg);
997 return 0;
998
999 free_and_fail:
1000 bs->file = NULL;
1001 g_free(bs->opaque);
1002 bs->opaque = NULL;
1003 bs->drv = NULL;
1004 return ret;
1005 }
1006
1007 /*
1008 * Opens a file using a protocol (file, host_device, nbd, ...)
1009 *
1010 * options is an indirect pointer to a QDict of options to pass to the block
1011 * drivers, or pointer to NULL for an empty set of options. If this function
1012 * takes ownership of the QDict reference, it will set *options to NULL;
1013 * otherwise, it will contain unused/unrecognized options after this function
1014 * returns. Then, the caller is responsible for freeing it. If it intends to
1015 * reuse the QDict, QINCREF() should be called beforehand.
1016 */
1017 static int bdrv_file_open(BlockDriverState *bs, const char *filename,
1018 QDict **options, int flags, Error **errp)
1019 {
1020 BlockDriver *drv;
1021 const char *drvname;
1022 bool parse_filename = false;
1023 Error *local_err = NULL;
1024 int ret;
1025
1026 /* Fetch the file name from the options QDict if necessary */
1027 if (!filename) {
1028 filename = qdict_get_try_str(*options, "filename");
1029 } else if (filename && !qdict_haskey(*options, "filename")) {
1030 qdict_put(*options, "filename", qstring_from_str(filename));
1031 parse_filename = true;
1032 } else {
1033 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
1034 "same time");
1035 ret = -EINVAL;
1036 goto fail;
1037 }
1038
1039 /* Find the right block driver */
1040 drvname = qdict_get_try_str(*options, "driver");
1041 if (drvname) {
1042 drv = bdrv_find_format(drvname);
1043 if (!drv) {
1044 error_setg(errp, "Unknown driver '%s'", drvname);
1045 }
1046 qdict_del(*options, "driver");
1047 } else if (filename) {
1048 drv = bdrv_find_protocol(filename, parse_filename);
1049 if (!drv) {
1050 error_setg(errp, "Unknown protocol");
1051 }
1052 } else {
1053 error_setg(errp, "Must specify either driver or file");
1054 drv = NULL;
1055 }
1056
1057 if (!drv) {
1058 /* errp has been set already */
1059 ret = -ENOENT;
1060 goto fail;
1061 }
1062
1063 /* Parse the filename and open it */
1064 if (drv->bdrv_parse_filename && parse_filename) {
1065 drv->bdrv_parse_filename(filename, *options, &local_err);
1066 if (local_err) {
1067 error_propagate(errp, local_err);
1068 ret = -EINVAL;
1069 goto fail;
1070 }
1071
1072 if (!drv->bdrv_needs_filename) {
1073 qdict_del(*options, "filename");
1074 } else {
1075 filename = qdict_get_str(*options, "filename");
1076 }
1077 }
1078
1079 if (!drv->bdrv_file_open) {
1080 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1081 *options = NULL;
1082 } else {
1083 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
1084 }
1085 if (ret < 0) {
1086 error_propagate(errp, local_err);
1087 goto fail;
1088 }
1089
1090 bs->growable = 1;
1091 return 0;
1092
1093 fail:
1094 return ret;
1095 }
1096
1097 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1098 {
1099
1100 if (bs->backing_hd) {
1101 assert(bs->backing_blocker);
1102 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1103 } else if (backing_hd) {
1104 error_setg(&bs->backing_blocker,
1105 "device is used as backing hd of '%s'",
1106 bs->device_name);
1107 }
1108
1109 bs->backing_hd = backing_hd;
1110 if (!backing_hd) {
1111 error_free(bs->backing_blocker);
1112 bs->backing_blocker = NULL;
1113 goto out;
1114 }
1115 bs->open_flags &= ~BDRV_O_NO_BACKING;
1116 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1117 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1118 backing_hd->drv ? backing_hd->drv->format_name : "");
1119
1120 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1121 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1122 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1123 bs->backing_blocker);
1124 out:
1125 bdrv_refresh_limits(bs);
1126 }
1127
1128 /*
1129 * Opens the backing file for a BlockDriverState if not yet open
1130 *
1131 * options is a QDict of options to pass to the block drivers, or NULL for an
1132 * empty set of options. The reference to the QDict is transferred to this
1133 * function (even on failure), so if the caller intends to reuse the dictionary,
1134 * it needs to use QINCREF() before calling bdrv_file_open.
1135 */
1136 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1137 {
1138 char *backing_filename = g_malloc0(PATH_MAX);
1139 int ret = 0;
1140 BlockDriver *back_drv = NULL;
1141 BlockDriverState *backing_hd;
1142 Error *local_err = NULL;
1143
1144 if (bs->backing_hd != NULL) {
1145 QDECREF(options);
1146 goto free_exit;
1147 }
1148
1149 /* NULL means an empty set of options */
1150 if (options == NULL) {
1151 options = qdict_new();
1152 }
1153
1154 bs->open_flags &= ~BDRV_O_NO_BACKING;
1155 if (qdict_haskey(options, "file.filename")) {
1156 backing_filename[0] = '\0';
1157 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1158 QDECREF(options);
1159 goto free_exit;
1160 } else {
1161 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1162 }
1163
1164 backing_hd = bdrv_new("", errp);
1165
1166 if (bs->backing_format[0] != '\0') {
1167 back_drv = bdrv_find_format(bs->backing_format);
1168 }
1169
1170 assert(bs->backing_hd == NULL);
1171 ret = bdrv_open(&backing_hd,
1172 *backing_filename ? backing_filename : NULL, NULL, options,
1173 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1174 if (ret < 0) {
1175 bdrv_unref(backing_hd);
1176 backing_hd = NULL;
1177 bs->open_flags |= BDRV_O_NO_BACKING;
1178 error_setg(errp, "Could not open backing file: %s",
1179 error_get_pretty(local_err));
1180 error_free(local_err);
1181 goto free_exit;
1182 }
1183 bdrv_set_backing_hd(bs, backing_hd);
1184
1185 free_exit:
1186 g_free(backing_filename);
1187 return ret;
1188 }
1189
1190 /*
1191 * Opens a disk image whose options are given as BlockdevRef in another block
1192 * device's options.
1193 *
1194 * If allow_none is true, no image will be opened if filename is false and no
1195 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1196 *
1197 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1198 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1199 * itself, all options starting with "${bdref_key}." are considered part of the
1200 * BlockdevRef.
1201 *
1202 * The BlockdevRef will be removed from the options QDict.
1203 *
1204 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1205 */
1206 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1207 QDict *options, const char *bdref_key, int flags,
1208 bool allow_none, Error **errp)
1209 {
1210 QDict *image_options;
1211 int ret;
1212 char *bdref_key_dot;
1213 const char *reference;
1214
1215 assert(pbs);
1216 assert(*pbs == NULL);
1217
1218 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1219 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1220 g_free(bdref_key_dot);
1221
1222 reference = qdict_get_try_str(options, bdref_key);
1223 if (!filename && !reference && !qdict_size(image_options)) {
1224 if (allow_none) {
1225 ret = 0;
1226 } else {
1227 error_setg(errp, "A block device must be specified for \"%s\"",
1228 bdref_key);
1229 ret = -EINVAL;
1230 }
1231 QDECREF(image_options);
1232 goto done;
1233 }
1234
1235 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1236
1237 done:
1238 qdict_del(options, bdref_key);
1239 return ret;
1240 }
1241
1242 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1243 {
1244 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1245 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1246 int64_t total_size;
1247 BlockDriver *bdrv_qcow2;
1248 QEMUOptionParameter *create_options;
1249 QDict *snapshot_options;
1250 BlockDriverState *bs_snapshot;
1251 Error *local_err;
1252 int ret;
1253
1254 /* if snapshot, we create a temporary backing file and open it
1255 instead of opening 'filename' directly */
1256
1257 /* Get the required size from the image */
1258 total_size = bdrv_getlength(bs);
1259 if (total_size < 0) {
1260 error_setg_errno(errp, -total_size, "Could not get image size");
1261 goto out;
1262 }
1263 total_size &= BDRV_SECTOR_MASK;
1264
1265 /* Create the temporary image */
1266 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1267 if (ret < 0) {
1268 error_setg_errno(errp, -ret, "Could not get temporary filename");
1269 goto out;
1270 }
1271
1272 bdrv_qcow2 = bdrv_find_format("qcow2");
1273 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1274 NULL);
1275
1276 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
1277
1278 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
1279 free_option_parameters(create_options);
1280 if (ret < 0) {
1281 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1282 "'%s': %s", tmp_filename,
1283 error_get_pretty(local_err));
1284 error_free(local_err);
1285 goto out;
1286 }
1287
1288 /* Prepare a new options QDict for the temporary file */
1289 snapshot_options = qdict_new();
1290 qdict_put(snapshot_options, "file.driver",
1291 qstring_from_str("file"));
1292 qdict_put(snapshot_options, "file.filename",
1293 qstring_from_str(tmp_filename));
1294
1295 bs_snapshot = bdrv_new("", &error_abort);
1296
1297 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1298 flags, bdrv_qcow2, &local_err);
1299 if (ret < 0) {
1300 error_propagate(errp, local_err);
1301 goto out;
1302 }
1303
1304 bdrv_append(bs_snapshot, bs);
1305
1306 out:
1307 g_free(tmp_filename);
1308 }
1309
1310 static QDict *parse_json_filename(const char *filename, Error **errp)
1311 {
1312 QObject *options_obj;
1313 QDict *options;
1314 int ret;
1315
1316 ret = strstart(filename, "json:", &filename);
1317 assert(ret);
1318
1319 options_obj = qobject_from_json(filename);
1320 if (!options_obj) {
1321 error_setg(errp, "Could not parse the JSON options");
1322 return NULL;
1323 }
1324
1325 if (qobject_type(options_obj) != QTYPE_QDICT) {
1326 qobject_decref(options_obj);
1327 error_setg(errp, "Invalid JSON object given");
1328 return NULL;
1329 }
1330
1331 options = qobject_to_qdict(options_obj);
1332 qdict_flatten(options);
1333
1334 return options;
1335 }
1336
1337 /*
1338 * Opens a disk image (raw, qcow2, vmdk, ...)
1339 *
1340 * options is a QDict of options to pass to the block drivers, or NULL for an
1341 * empty set of options. The reference to the QDict belongs to the block layer
1342 * after the call (even on failure), so if the caller intends to reuse the
1343 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1344 *
1345 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1346 * If it is not NULL, the referenced BDS will be reused.
1347 *
1348 * The reference parameter may be used to specify an existing block device which
1349 * should be opened. If specified, neither options nor a filename may be given,
1350 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1351 */
1352 int bdrv_open(BlockDriverState **pbs, const char *filename,
1353 const char *reference, QDict *options, int flags,
1354 BlockDriver *drv, Error **errp)
1355 {
1356 int ret;
1357 BlockDriverState *file = NULL, *bs;
1358 const char *drvname;
1359 Error *local_err = NULL;
1360 int snapshot_flags = 0;
1361
1362 assert(pbs);
1363
1364 if (reference) {
1365 bool options_non_empty = options ? qdict_size(options) : false;
1366 QDECREF(options);
1367
1368 if (*pbs) {
1369 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1370 "another block device");
1371 return -EINVAL;
1372 }
1373
1374 if (filename || options_non_empty) {
1375 error_setg(errp, "Cannot reference an existing block device with "
1376 "additional options or a new filename");
1377 return -EINVAL;
1378 }
1379
1380 bs = bdrv_lookup_bs(reference, reference, errp);
1381 if (!bs) {
1382 return -ENODEV;
1383 }
1384 bdrv_ref(bs);
1385 *pbs = bs;
1386 return 0;
1387 }
1388
1389 if (*pbs) {
1390 bs = *pbs;
1391 } else {
1392 bs = bdrv_new("", &error_abort);
1393 }
1394
1395 /* NULL means an empty set of options */
1396 if (options == NULL) {
1397 options = qdict_new();
1398 }
1399
1400 if (filename && g_str_has_prefix(filename, "json:")) {
1401 QDict *json_options = parse_json_filename(filename, &local_err);
1402 if (local_err) {
1403 ret = -EINVAL;
1404 goto fail;
1405 }
1406
1407 /* Options given in the filename have lower priority than options
1408 * specified directly */
1409 qdict_join(options, json_options, false);
1410 QDECREF(json_options);
1411 filename = NULL;
1412 }
1413
1414 bs->options = options;
1415 options = qdict_clone_shallow(options);
1416
1417 if (flags & BDRV_O_PROTOCOL) {
1418 assert(!drv);
1419 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
1420 &local_err);
1421 if (!ret) {
1422 drv = bs->drv;
1423 goto done;
1424 } else if (bs->drv) {
1425 goto close_and_fail;
1426 } else {
1427 goto fail;
1428 }
1429 }
1430
1431 /* Open image file without format layer */
1432 if (flags & BDRV_O_RDWR) {
1433 flags |= BDRV_O_ALLOW_RDWR;
1434 }
1435 if (flags & BDRV_O_SNAPSHOT) {
1436 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1437 flags = bdrv_backing_flags(flags);
1438 }
1439
1440 assert(file == NULL);
1441 ret = bdrv_open_image(&file, filename, options, "file",
1442 bdrv_inherited_flags(flags),
1443 true, &local_err);
1444 if (ret < 0) {
1445 goto fail;
1446 }
1447
1448 /* Find the right image format driver */
1449 drvname = qdict_get_try_str(options, "driver");
1450 if (drvname) {
1451 drv = bdrv_find_format(drvname);
1452 qdict_del(options, "driver");
1453 if (!drv) {
1454 error_setg(errp, "Invalid driver: '%s'", drvname);
1455 ret = -EINVAL;
1456 goto fail;
1457 }
1458 }
1459
1460 if (!drv) {
1461 if (file) {
1462 ret = find_image_format(file, filename, &drv, &local_err);
1463 } else {
1464 error_setg(errp, "Must specify either driver or file");
1465 ret = -EINVAL;
1466 goto fail;
1467 }
1468 }
1469
1470 if (!drv) {
1471 goto fail;
1472 }
1473
1474 /* Open the image */
1475 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1476 if (ret < 0) {
1477 goto fail;
1478 }
1479
1480 if (file && (bs->file != file)) {
1481 bdrv_unref(file);
1482 file = NULL;
1483 }
1484
1485 /* If there is a backing file, use it */
1486 if ((flags & BDRV_O_NO_BACKING) == 0) {
1487 QDict *backing_options;
1488
1489 qdict_extract_subqdict(options, &backing_options, "backing.");
1490 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1491 if (ret < 0) {
1492 goto close_and_fail;
1493 }
1494 }
1495
1496 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1497 * temporary snapshot afterwards. */
1498 if (snapshot_flags) {
1499 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1500 if (local_err) {
1501 error_propagate(errp, local_err);
1502 goto close_and_fail;
1503 }
1504 }
1505
1506
1507 done:
1508 /* Check if any unknown options were used */
1509 if (options && (qdict_size(options) != 0)) {
1510 const QDictEntry *entry = qdict_first(options);
1511 if (flags & BDRV_O_PROTOCOL) {
1512 error_setg(errp, "Block protocol '%s' doesn't support the option "
1513 "'%s'", drv->format_name, entry->key);
1514 } else {
1515 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1516 "support the option '%s'", drv->format_name,
1517 bs->device_name, entry->key);
1518 }
1519
1520 ret = -EINVAL;
1521 goto close_and_fail;
1522 }
1523
1524 if (!bdrv_key_required(bs)) {
1525 bdrv_dev_change_media_cb(bs, true);
1526 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1527 && !runstate_check(RUN_STATE_INMIGRATE)
1528 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1529 error_setg(errp,
1530 "Guest must be stopped for opening of encrypted image");
1531 ret = -EBUSY;
1532 goto close_and_fail;
1533 }
1534
1535 QDECREF(options);
1536 *pbs = bs;
1537 return 0;
1538
1539 fail:
1540 if (file != NULL) {
1541 bdrv_unref(file);
1542 }
1543 QDECREF(bs->options);
1544 QDECREF(options);
1545 bs->options = NULL;
1546 if (!*pbs) {
1547 /* If *pbs is NULL, a new BDS has been created in this function and
1548 needs to be freed now. Otherwise, it does not need to be closed,
1549 since it has not really been opened yet. */
1550 bdrv_unref(bs);
1551 }
1552 if (local_err) {
1553 error_propagate(errp, local_err);
1554 }
1555 return ret;
1556
1557 close_and_fail:
1558 /* See fail path, but now the BDS has to be always closed */
1559 if (*pbs) {
1560 bdrv_close(bs);
1561 } else {
1562 bdrv_unref(bs);
1563 }
1564 QDECREF(options);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1567 }
1568 return ret;
1569 }
1570
1571 typedef struct BlockReopenQueueEntry {
1572 bool prepared;
1573 BDRVReopenState state;
1574 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1575 } BlockReopenQueueEntry;
1576
1577 /*
1578 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1579 * reopen of multiple devices.
1580 *
1581 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1582 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1583 * be created and initialized. This newly created BlockReopenQueue should be
1584 * passed back in for subsequent calls that are intended to be of the same
1585 * atomic 'set'.
1586 *
1587 * bs is the BlockDriverState to add to the reopen queue.
1588 *
1589 * flags contains the open flags for the associated bs
1590 *
1591 * returns a pointer to bs_queue, which is either the newly allocated
1592 * bs_queue, or the existing bs_queue being used.
1593 *
1594 */
1595 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1596 BlockDriverState *bs, int flags)
1597 {
1598 assert(bs != NULL);
1599
1600 BlockReopenQueueEntry *bs_entry;
1601 if (bs_queue == NULL) {
1602 bs_queue = g_new0(BlockReopenQueue, 1);
1603 QSIMPLEQ_INIT(bs_queue);
1604 }
1605
1606 /* bdrv_open() masks this flag out */
1607 flags &= ~BDRV_O_PROTOCOL;
1608
1609 if (bs->file) {
1610 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1611 }
1612
1613 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1614 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1615
1616 bs_entry->state.bs = bs;
1617 bs_entry->state.flags = flags;
1618
1619 return bs_queue;
1620 }
1621
1622 /*
1623 * Reopen multiple BlockDriverStates atomically & transactionally.
1624 *
1625 * The queue passed in (bs_queue) must have been built up previous
1626 * via bdrv_reopen_queue().
1627 *
1628 * Reopens all BDS specified in the queue, with the appropriate
1629 * flags. All devices are prepared for reopen, and failure of any
1630 * device will cause all device changes to be abandonded, and intermediate
1631 * data cleaned up.
1632 *
1633 * If all devices prepare successfully, then the changes are committed
1634 * to all devices.
1635 *
1636 */
1637 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1638 {
1639 int ret = -1;
1640 BlockReopenQueueEntry *bs_entry, *next;
1641 Error *local_err = NULL;
1642
1643 assert(bs_queue != NULL);
1644
1645 bdrv_drain_all();
1646
1647 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1648 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1649 error_propagate(errp, local_err);
1650 goto cleanup;
1651 }
1652 bs_entry->prepared = true;
1653 }
1654
1655 /* If we reach this point, we have success and just need to apply the
1656 * changes
1657 */
1658 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1659 bdrv_reopen_commit(&bs_entry->state);
1660 }
1661
1662 ret = 0;
1663
1664 cleanup:
1665 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1666 if (ret && bs_entry->prepared) {
1667 bdrv_reopen_abort(&bs_entry->state);
1668 }
1669 g_free(bs_entry);
1670 }
1671 g_free(bs_queue);
1672 return ret;
1673 }
1674
1675
1676 /* Reopen a single BlockDriverState with the specified flags. */
1677 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1678 {
1679 int ret = -1;
1680 Error *local_err = NULL;
1681 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1682
1683 ret = bdrv_reopen_multiple(queue, &local_err);
1684 if (local_err != NULL) {
1685 error_propagate(errp, local_err);
1686 }
1687 return ret;
1688 }
1689
1690
1691 /*
1692 * Prepares a BlockDriverState for reopen. All changes are staged in the
1693 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1694 * the block driver layer .bdrv_reopen_prepare()
1695 *
1696 * bs is the BlockDriverState to reopen
1697 * flags are the new open flags
1698 * queue is the reopen queue
1699 *
1700 * Returns 0 on success, non-zero on error. On error errp will be set
1701 * as well.
1702 *
1703 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1704 * It is the responsibility of the caller to then call the abort() or
1705 * commit() for any other BDS that have been left in a prepare() state
1706 *
1707 */
1708 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1709 Error **errp)
1710 {
1711 int ret = -1;
1712 Error *local_err = NULL;
1713 BlockDriver *drv;
1714
1715 assert(reopen_state != NULL);
1716 assert(reopen_state->bs->drv != NULL);
1717 drv = reopen_state->bs->drv;
1718
1719 /* if we are to stay read-only, do not allow permission change
1720 * to r/w */
1721 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1722 reopen_state->flags & BDRV_O_RDWR) {
1723 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1724 reopen_state->bs->device_name);
1725 goto error;
1726 }
1727
1728
1729 ret = bdrv_flush(reopen_state->bs);
1730 if (ret) {
1731 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1732 strerror(-ret));
1733 goto error;
1734 }
1735
1736 if (drv->bdrv_reopen_prepare) {
1737 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1738 if (ret) {
1739 if (local_err != NULL) {
1740 error_propagate(errp, local_err);
1741 } else {
1742 error_setg(errp, "failed while preparing to reopen image '%s'",
1743 reopen_state->bs->filename);
1744 }
1745 goto error;
1746 }
1747 } else {
1748 /* It is currently mandatory to have a bdrv_reopen_prepare()
1749 * handler for each supported drv. */
1750 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1751 drv->format_name, reopen_state->bs->device_name,
1752 "reopening of file");
1753 ret = -1;
1754 goto error;
1755 }
1756
1757 ret = 0;
1758
1759 error:
1760 return ret;
1761 }
1762
1763 /*
1764 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1765 * makes them final by swapping the staging BlockDriverState contents into
1766 * the active BlockDriverState contents.
1767 */
1768 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1769 {
1770 BlockDriver *drv;
1771
1772 assert(reopen_state != NULL);
1773 drv = reopen_state->bs->drv;
1774 assert(drv != NULL);
1775
1776 /* If there are any driver level actions to take */
1777 if (drv->bdrv_reopen_commit) {
1778 drv->bdrv_reopen_commit(reopen_state);
1779 }
1780
1781 /* set BDS specific flags now */
1782 reopen_state->bs->open_flags = reopen_state->flags;
1783 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1784 BDRV_O_CACHE_WB);
1785 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1786
1787 bdrv_refresh_limits(reopen_state->bs);
1788 }
1789
1790 /*
1791 * Abort the reopen, and delete and free the staged changes in
1792 * reopen_state
1793 */
1794 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1795 {
1796 BlockDriver *drv;
1797
1798 assert(reopen_state != NULL);
1799 drv = reopen_state->bs->drv;
1800 assert(drv != NULL);
1801
1802 if (drv->bdrv_reopen_abort) {
1803 drv->bdrv_reopen_abort(reopen_state);
1804 }
1805 }
1806
1807
1808 void bdrv_close(BlockDriverState *bs)
1809 {
1810 if (bs->job) {
1811 block_job_cancel_sync(bs->job);
1812 }
1813 bdrv_drain_all(); /* complete I/O */
1814 bdrv_flush(bs);
1815 bdrv_drain_all(); /* in case flush left pending I/O */
1816 notifier_list_notify(&bs->close_notifiers, bs);
1817
1818 if (bs->drv) {
1819 if (bs->backing_hd) {
1820 BlockDriverState *backing_hd = bs->backing_hd;
1821 bdrv_set_backing_hd(bs, NULL);
1822 bdrv_unref(backing_hd);
1823 }
1824 bs->drv->bdrv_close(bs);
1825 g_free(bs->opaque);
1826 bs->opaque = NULL;
1827 bs->drv = NULL;
1828 bs->copy_on_read = 0;
1829 bs->backing_file[0] = '\0';
1830 bs->backing_format[0] = '\0';
1831 bs->total_sectors = 0;
1832 bs->encrypted = 0;
1833 bs->valid_key = 0;
1834 bs->sg = 0;
1835 bs->growable = 0;
1836 bs->zero_beyond_eof = false;
1837 QDECREF(bs->options);
1838 bs->options = NULL;
1839
1840 if (bs->file != NULL) {
1841 bdrv_unref(bs->file);
1842 bs->file = NULL;
1843 }
1844 }
1845
1846 bdrv_dev_change_media_cb(bs, false);
1847
1848 /*throttling disk I/O limits*/
1849 if (bs->io_limits_enabled) {
1850 bdrv_io_limits_disable(bs);
1851 }
1852 }
1853
1854 void bdrv_close_all(void)
1855 {
1856 BlockDriverState *bs;
1857
1858 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1859 AioContext *aio_context = bdrv_get_aio_context(bs);
1860
1861 aio_context_acquire(aio_context);
1862 bdrv_close(bs);
1863 aio_context_release(aio_context);
1864 }
1865 }
1866
1867 /* Check if any requests are in-flight (including throttled requests) */
1868 static bool bdrv_requests_pending(BlockDriverState *bs)
1869 {
1870 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1871 return true;
1872 }
1873 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1874 return true;
1875 }
1876 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1877 return true;
1878 }
1879 if (bs->file && bdrv_requests_pending(bs->file)) {
1880 return true;
1881 }
1882 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1883 return true;
1884 }
1885 return false;
1886 }
1887
1888 /*
1889 * Wait for pending requests to complete across all BlockDriverStates
1890 *
1891 * This function does not flush data to disk, use bdrv_flush_all() for that
1892 * after calling this function.
1893 *
1894 * Note that completion of an asynchronous I/O operation can trigger any
1895 * number of other I/O operations on other devices---for example a coroutine
1896 * can be arbitrarily complex and a constant flow of I/O can come until the
1897 * coroutine is complete. Because of this, it is not possible to have a
1898 * function to drain a single device's I/O queue.
1899 */
1900 void bdrv_drain_all(void)
1901 {
1902 /* Always run first iteration so any pending completion BHs run */
1903 bool busy = true;
1904 BlockDriverState *bs;
1905
1906 while (busy) {
1907 busy = false;
1908
1909 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1910 AioContext *aio_context = bdrv_get_aio_context(bs);
1911 bool bs_busy;
1912
1913 aio_context_acquire(aio_context);
1914 bdrv_start_throttled_reqs(bs);
1915 bs_busy = bdrv_requests_pending(bs);
1916 bs_busy |= aio_poll(aio_context, bs_busy);
1917 aio_context_release(aio_context);
1918
1919 busy |= bs_busy;
1920 }
1921 }
1922 }
1923
1924 /* make a BlockDriverState anonymous by removing from bdrv_state and
1925 * graph_bdrv_state list.
1926 Also, NULL terminate the device_name to prevent double remove */
1927 void bdrv_make_anon(BlockDriverState *bs)
1928 {
1929 if (bs->device_name[0] != '\0') {
1930 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1931 }
1932 bs->device_name[0] = '\0';
1933 if (bs->node_name[0] != '\0') {
1934 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1935 }
1936 bs->node_name[0] = '\0';
1937 }
1938
1939 static void bdrv_rebind(BlockDriverState *bs)
1940 {
1941 if (bs->drv && bs->drv->bdrv_rebind) {
1942 bs->drv->bdrv_rebind(bs);
1943 }
1944 }
1945
1946 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1947 BlockDriverState *bs_src)
1948 {
1949 /* move some fields that need to stay attached to the device */
1950
1951 /* dev info */
1952 bs_dest->dev_ops = bs_src->dev_ops;
1953 bs_dest->dev_opaque = bs_src->dev_opaque;
1954 bs_dest->dev = bs_src->dev;
1955 bs_dest->guest_block_size = bs_src->guest_block_size;
1956 bs_dest->copy_on_read = bs_src->copy_on_read;
1957
1958 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1959
1960 /* i/o throttled req */
1961 memcpy(&bs_dest->throttle_state,
1962 &bs_src->throttle_state,
1963 sizeof(ThrottleState));
1964 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1965 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1966 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1967
1968 /* r/w error */
1969 bs_dest->on_read_error = bs_src->on_read_error;
1970 bs_dest->on_write_error = bs_src->on_write_error;
1971
1972 /* i/o status */
1973 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1974 bs_dest->iostatus = bs_src->iostatus;
1975
1976 /* dirty bitmap */
1977 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1978
1979 /* reference count */
1980 bs_dest->refcnt = bs_src->refcnt;
1981
1982 /* job */
1983 bs_dest->job = bs_src->job;
1984
1985 /* keep the same entry in bdrv_states */
1986 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1987 bs_src->device_name);
1988 bs_dest->device_list = bs_src->device_list;
1989 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1990 sizeof(bs_dest->op_blockers));
1991 }
1992
1993 /*
1994 * Swap bs contents for two image chains while they are live,
1995 * while keeping required fields on the BlockDriverState that is
1996 * actually attached to a device.
1997 *
1998 * This will modify the BlockDriverState fields, and swap contents
1999 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2000 *
2001 * bs_new is required to be anonymous.
2002 *
2003 * This function does not create any image files.
2004 */
2005 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2006 {
2007 BlockDriverState tmp;
2008
2009 /* The code needs to swap the node_name but simply swapping node_list won't
2010 * work so first remove the nodes from the graph list, do the swap then
2011 * insert them back if needed.
2012 */
2013 if (bs_new->node_name[0] != '\0') {
2014 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2015 }
2016 if (bs_old->node_name[0] != '\0') {
2017 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2018 }
2019
2020 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2021 assert(bs_new->device_name[0] == '\0');
2022 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2023 assert(bs_new->job == NULL);
2024 assert(bs_new->dev == NULL);
2025 assert(bs_new->io_limits_enabled == false);
2026 assert(!throttle_have_timer(&bs_new->throttle_state));
2027
2028 tmp = *bs_new;
2029 *bs_new = *bs_old;
2030 *bs_old = tmp;
2031
2032 /* there are some fields that should not be swapped, move them back */
2033 bdrv_move_feature_fields(&tmp, bs_old);
2034 bdrv_move_feature_fields(bs_old, bs_new);
2035 bdrv_move_feature_fields(bs_new, &tmp);
2036
2037 /* bs_new shouldn't be in bdrv_states even after the swap! */
2038 assert(bs_new->device_name[0] == '\0');
2039
2040 /* Check a few fields that should remain attached to the device */
2041 assert(bs_new->dev == NULL);
2042 assert(bs_new->job == NULL);
2043 assert(bs_new->io_limits_enabled == false);
2044 assert(!throttle_have_timer(&bs_new->throttle_state));
2045
2046 /* insert the nodes back into the graph node list if needed */
2047 if (bs_new->node_name[0] != '\0') {
2048 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2049 }
2050 if (bs_old->node_name[0] != '\0') {
2051 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2052 }
2053
2054 bdrv_rebind(bs_new);
2055 bdrv_rebind(bs_old);
2056 }
2057
2058 /*
2059 * Add new bs contents at the top of an image chain while the chain is
2060 * live, while keeping required fields on the top layer.
2061 *
2062 * This will modify the BlockDriverState fields, and swap contents
2063 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2064 *
2065 * bs_new is required to be anonymous.
2066 *
2067 * This function does not create any image files.
2068 */
2069 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2070 {
2071 bdrv_swap(bs_new, bs_top);
2072
2073 /* The contents of 'tmp' will become bs_top, as we are
2074 * swapping bs_new and bs_top contents. */
2075 bdrv_set_backing_hd(bs_top, bs_new);
2076 }
2077
2078 static void bdrv_delete(BlockDriverState *bs)
2079 {
2080 assert(!bs->dev);
2081 assert(!bs->job);
2082 assert(bdrv_op_blocker_is_empty(bs));
2083 assert(!bs->refcnt);
2084 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2085
2086 bdrv_close(bs);
2087
2088 /* remove from list, if necessary */
2089 bdrv_make_anon(bs);
2090
2091 g_free(bs);
2092 }
2093
2094 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2095 /* TODO change to DeviceState *dev when all users are qdevified */
2096 {
2097 if (bs->dev) {
2098 return -EBUSY;
2099 }
2100 bs->dev = dev;
2101 bdrv_iostatus_reset(bs);
2102 return 0;
2103 }
2104
2105 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2106 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2107 {
2108 if (bdrv_attach_dev(bs, dev) < 0) {
2109 abort();
2110 }
2111 }
2112
2113 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2114 /* TODO change to DeviceState *dev when all users are qdevified */
2115 {
2116 assert(bs->dev == dev);
2117 bs->dev = NULL;
2118 bs->dev_ops = NULL;
2119 bs->dev_opaque = NULL;
2120 bs->guest_block_size = 512;
2121 }
2122
2123 /* TODO change to return DeviceState * when all users are qdevified */
2124 void *bdrv_get_attached_dev(BlockDriverState *bs)
2125 {
2126 return bs->dev;
2127 }
2128
2129 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2130 void *opaque)
2131 {
2132 bs->dev_ops = ops;
2133 bs->dev_opaque = opaque;
2134 }
2135
2136 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2137 enum MonitorEvent ev,
2138 BlockErrorAction action, bool is_read)
2139 {
2140 QObject *data;
2141 const char *action_str;
2142
2143 switch (action) {
2144 case BDRV_ACTION_REPORT:
2145 action_str = "report";
2146 break;
2147 case BDRV_ACTION_IGNORE:
2148 action_str = "ignore";
2149 break;
2150 case BDRV_ACTION_STOP:
2151 action_str = "stop";
2152 break;
2153 default:
2154 abort();
2155 }
2156
2157 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2158 bdrv->device_name,
2159 action_str,
2160 is_read ? "read" : "write");
2161 monitor_protocol_event(ev, data);
2162
2163 qobject_decref(data);
2164 }
2165
2166 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
2167 {
2168 QObject *data;
2169
2170 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2171 bdrv_get_device_name(bs), ejected);
2172 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
2173
2174 qobject_decref(data);
2175 }
2176
2177 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2178 {
2179 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2180 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2181 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2182 if (tray_was_closed) {
2183 /* tray open */
2184 bdrv_emit_qmp_eject_event(bs, true);
2185 }
2186 if (load) {
2187 /* tray close */
2188 bdrv_emit_qmp_eject_event(bs, false);
2189 }
2190 }
2191 }
2192
2193 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2194 {
2195 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2196 }
2197
2198 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2199 {
2200 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2201 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2202 }
2203 }
2204
2205 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2206 {
2207 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2208 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2209 }
2210 return false;
2211 }
2212
2213 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2214 {
2215 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2216 bs->dev_ops->resize_cb(bs->dev_opaque);
2217 }
2218 }
2219
2220 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2221 {
2222 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2223 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2224 }
2225 return false;
2226 }
2227
2228 /*
2229 * Run consistency checks on an image
2230 *
2231 * Returns 0 if the check could be completed (it doesn't mean that the image is
2232 * free of errors) or -errno when an internal error occurred. The results of the
2233 * check are stored in res.
2234 */
2235 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2236 {
2237 if (bs->drv->bdrv_check == NULL) {
2238 return -ENOTSUP;
2239 }
2240
2241 memset(res, 0, sizeof(*res));
2242 return bs->drv->bdrv_check(bs, res, fix);
2243 }
2244
2245 #define COMMIT_BUF_SECTORS 2048
2246
2247 /* commit COW file into the raw image */
2248 int bdrv_commit(BlockDriverState *bs)
2249 {
2250 BlockDriver *drv = bs->drv;
2251 int64_t sector, total_sectors, length, backing_length;
2252 int n, ro, open_flags;
2253 int ret = 0;
2254 uint8_t *buf = NULL;
2255 char filename[PATH_MAX];
2256
2257 if (!drv)
2258 return -ENOMEDIUM;
2259
2260 if (!bs->backing_hd) {
2261 return -ENOTSUP;
2262 }
2263
2264 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2265 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2266 return -EBUSY;
2267 }
2268
2269 ro = bs->backing_hd->read_only;
2270 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2271 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2272 open_flags = bs->backing_hd->open_flags;
2273
2274 if (ro) {
2275 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2276 return -EACCES;
2277 }
2278 }
2279
2280 length = bdrv_getlength(bs);
2281 if (length < 0) {
2282 ret = length;
2283 goto ro_cleanup;
2284 }
2285
2286 backing_length = bdrv_getlength(bs->backing_hd);
2287 if (backing_length < 0) {
2288 ret = backing_length;
2289 goto ro_cleanup;
2290 }
2291
2292 /* If our top snapshot is larger than the backing file image,
2293 * grow the backing file image if possible. If not possible,
2294 * we must return an error */
2295 if (length > backing_length) {
2296 ret = bdrv_truncate(bs->backing_hd, length);
2297 if (ret < 0) {
2298 goto ro_cleanup;
2299 }
2300 }
2301
2302 total_sectors = length >> BDRV_SECTOR_BITS;
2303 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2304
2305 for (sector = 0; sector < total_sectors; sector += n) {
2306 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2307 if (ret < 0) {
2308 goto ro_cleanup;
2309 }
2310 if (ret) {
2311 ret = bdrv_read(bs, sector, buf, n);
2312 if (ret < 0) {
2313 goto ro_cleanup;
2314 }
2315
2316 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2317 if (ret < 0) {
2318 goto ro_cleanup;
2319 }
2320 }
2321 }
2322
2323 if (drv->bdrv_make_empty) {
2324 ret = drv->bdrv_make_empty(bs);
2325 if (ret < 0) {
2326 goto ro_cleanup;
2327 }
2328 bdrv_flush(bs);
2329 }
2330
2331 /*
2332 * Make sure all data we wrote to the backing device is actually
2333 * stable on disk.
2334 */
2335 if (bs->backing_hd) {
2336 bdrv_flush(bs->backing_hd);
2337 }
2338
2339 ret = 0;
2340 ro_cleanup:
2341 g_free(buf);
2342
2343 if (ro) {
2344 /* ignoring error return here */
2345 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2346 }
2347
2348 return ret;
2349 }
2350
2351 int bdrv_commit_all(void)
2352 {
2353 BlockDriverState *bs;
2354
2355 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2356 AioContext *aio_context = bdrv_get_aio_context(bs);
2357
2358 aio_context_acquire(aio_context);
2359 if (bs->drv && bs->backing_hd) {
2360 int ret = bdrv_commit(bs);
2361 if (ret < 0) {
2362 aio_context_release(aio_context);
2363 return ret;
2364 }
2365 }
2366 aio_context_release(aio_context);
2367 }
2368 return 0;
2369 }
2370
2371 /**
2372 * Remove an active request from the tracked requests list
2373 *
2374 * This function should be called when a tracked request is completing.
2375 */
2376 static void tracked_request_end(BdrvTrackedRequest *req)
2377 {
2378 if (req->serialising) {
2379 req->bs->serialising_in_flight--;
2380 }
2381
2382 QLIST_REMOVE(req, list);
2383 qemu_co_queue_restart_all(&req->wait_queue);
2384 }
2385
2386 /**
2387 * Add an active request to the tracked requests list
2388 */
2389 static void tracked_request_begin(BdrvTrackedRequest *req,
2390 BlockDriverState *bs,
2391 int64_t offset,
2392 unsigned int bytes, bool is_write)
2393 {
2394 *req = (BdrvTrackedRequest){
2395 .bs = bs,
2396 .offset = offset,
2397 .bytes = bytes,
2398 .is_write = is_write,
2399 .co = qemu_coroutine_self(),
2400 .serialising = false,
2401 .overlap_offset = offset,
2402 .overlap_bytes = bytes,
2403 };
2404
2405 qemu_co_queue_init(&req->wait_queue);
2406
2407 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2408 }
2409
2410 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2411 {
2412 int64_t overlap_offset = req->offset & ~(align - 1);
2413 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2414 - overlap_offset;
2415
2416 if (!req->serialising) {
2417 req->bs->serialising_in_flight++;
2418 req->serialising = true;
2419 }
2420
2421 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2422 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2423 }
2424
2425 /**
2426 * Round a region to cluster boundaries
2427 */
2428 void bdrv_round_to_clusters(BlockDriverState *bs,
2429 int64_t sector_num, int nb_sectors,
2430 int64_t *cluster_sector_num,
2431 int *cluster_nb_sectors)
2432 {
2433 BlockDriverInfo bdi;
2434
2435 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2436 *cluster_sector_num = sector_num;
2437 *cluster_nb_sectors = nb_sectors;
2438 } else {
2439 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2440 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2441 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2442 nb_sectors, c);
2443 }
2444 }
2445
2446 static int bdrv_get_cluster_size(BlockDriverState *bs)
2447 {
2448 BlockDriverInfo bdi;
2449 int ret;
2450
2451 ret = bdrv_get_info(bs, &bdi);
2452 if (ret < 0 || bdi.cluster_size == 0) {
2453 return bs->request_alignment;
2454 } else {
2455 return bdi.cluster_size;
2456 }
2457 }
2458
2459 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2460 int64_t offset, unsigned int bytes)
2461 {
2462 /* aaaa bbbb */
2463 if (offset >= req->overlap_offset + req->overlap_bytes) {
2464 return false;
2465 }
2466 /* bbbb aaaa */
2467 if (req->overlap_offset >= offset + bytes) {
2468 return false;
2469 }
2470 return true;
2471 }
2472
2473 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2474 {
2475 BlockDriverState *bs = self->bs;
2476 BdrvTrackedRequest *req;
2477 bool retry;
2478 bool waited = false;
2479
2480 if (!bs->serialising_in_flight) {
2481 return false;
2482 }
2483
2484 do {
2485 retry = false;
2486 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2487 if (req == self || (!req->serialising && !self->serialising)) {
2488 continue;
2489 }
2490 if (tracked_request_overlaps(req, self->overlap_offset,
2491 self->overlap_bytes))
2492 {
2493 /* Hitting this means there was a reentrant request, for
2494 * example, a block driver issuing nested requests. This must
2495 * never happen since it means deadlock.
2496 */
2497 assert(qemu_coroutine_self() != req->co);
2498
2499 /* If the request is already (indirectly) waiting for us, or
2500 * will wait for us as soon as it wakes up, then just go on
2501 * (instead of producing a deadlock in the former case). */
2502 if (!req->waiting_for) {
2503 self->waiting_for = req;
2504 qemu_co_queue_wait(&req->wait_queue);
2505 self->waiting_for = NULL;
2506 retry = true;
2507 waited = true;
2508 break;
2509 }
2510 }
2511 }
2512 } while (retry);
2513
2514 return waited;
2515 }
2516
2517 /*
2518 * Return values:
2519 * 0 - success
2520 * -EINVAL - backing format specified, but no file
2521 * -ENOSPC - can't update the backing file because no space is left in the
2522 * image file header
2523 * -ENOTSUP - format driver doesn't support changing the backing file
2524 */
2525 int bdrv_change_backing_file(BlockDriverState *bs,
2526 const char *backing_file, const char *backing_fmt)
2527 {
2528 BlockDriver *drv = bs->drv;
2529 int ret;
2530
2531 /* Backing file format doesn't make sense without a backing file */
2532 if (backing_fmt && !backing_file) {
2533 return -EINVAL;
2534 }
2535
2536 if (drv->bdrv_change_backing_file != NULL) {
2537 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2538 } else {
2539 ret = -ENOTSUP;
2540 }
2541
2542 if (ret == 0) {
2543 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2544 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2545 }
2546 return ret;
2547 }
2548
2549 /*
2550 * Finds the image layer in the chain that has 'bs' as its backing file.
2551 *
2552 * active is the current topmost image.
2553 *
2554 * Returns NULL if bs is not found in active's image chain,
2555 * or if active == bs.
2556 */
2557 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2558 BlockDriverState *bs)
2559 {
2560 BlockDriverState *overlay = NULL;
2561 BlockDriverState *intermediate;
2562
2563 assert(active != NULL);
2564 assert(bs != NULL);
2565
2566 /* if bs is the same as active, then by definition it has no overlay
2567 */
2568 if (active == bs) {
2569 return NULL;
2570 }
2571
2572 intermediate = active;
2573 while (intermediate->backing_hd) {
2574 if (intermediate->backing_hd == bs) {
2575 overlay = intermediate;
2576 break;
2577 }
2578 intermediate = intermediate->backing_hd;
2579 }
2580
2581 return overlay;
2582 }
2583
2584 typedef struct BlkIntermediateStates {
2585 BlockDriverState *bs;
2586 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2587 } BlkIntermediateStates;
2588
2589
2590 /*
2591 * Drops images above 'base' up to and including 'top', and sets the image
2592 * above 'top' to have base as its backing file.
2593 *
2594 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2595 * information in 'bs' can be properly updated.
2596 *
2597 * E.g., this will convert the following chain:
2598 * bottom <- base <- intermediate <- top <- active
2599 *
2600 * to
2601 *
2602 * bottom <- base <- active
2603 *
2604 * It is allowed for bottom==base, in which case it converts:
2605 *
2606 * base <- intermediate <- top <- active
2607 *
2608 * to
2609 *
2610 * base <- active
2611 *
2612 * Error conditions:
2613 * if active == top, that is considered an error
2614 *
2615 */
2616 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2617 BlockDriverState *base)
2618 {
2619 BlockDriverState *intermediate;
2620 BlockDriverState *base_bs = NULL;
2621 BlockDriverState *new_top_bs = NULL;
2622 BlkIntermediateStates *intermediate_state, *next;
2623 int ret = -EIO;
2624
2625 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2626 QSIMPLEQ_INIT(&states_to_delete);
2627
2628 if (!top->drv || !base->drv) {
2629 goto exit;
2630 }
2631
2632 new_top_bs = bdrv_find_overlay(active, top);
2633
2634 if (new_top_bs == NULL) {
2635 /* we could not find the image above 'top', this is an error */
2636 goto exit;
2637 }
2638
2639 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2640 * to do, no intermediate images */
2641 if (new_top_bs->backing_hd == base) {
2642 ret = 0;
2643 goto exit;
2644 }
2645
2646 intermediate = top;
2647
2648 /* now we will go down through the list, and add each BDS we find
2649 * into our deletion queue, until we hit the 'base'
2650 */
2651 while (intermediate) {
2652 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2653 intermediate_state->bs = intermediate;
2654 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2655
2656 if (intermediate->backing_hd == base) {
2657 base_bs = intermediate->backing_hd;
2658 break;
2659 }
2660 intermediate = intermediate->backing_hd;
2661 }
2662 if (base_bs == NULL) {
2663 /* something went wrong, we did not end at the base. safely
2664 * unravel everything, and exit with error */
2665 goto exit;
2666 }
2667
2668 /* success - we can delete the intermediate states, and link top->base */
2669 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2670 base_bs->drv ? base_bs->drv->format_name : "");
2671 if (ret) {
2672 goto exit;
2673 }
2674 bdrv_set_backing_hd(new_top_bs, base_bs);
2675
2676 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2677 /* so that bdrv_close() does not recursively close the chain */
2678 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2679 bdrv_unref(intermediate_state->bs);
2680 }
2681 ret = 0;
2682
2683 exit:
2684 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2685 g_free(intermediate_state);
2686 }
2687 return ret;
2688 }
2689
2690
2691 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2692 size_t size)
2693 {
2694 int64_t len;
2695
2696 if (size > INT_MAX) {
2697 return -EIO;
2698 }
2699
2700 if (!bdrv_is_inserted(bs))
2701 return -ENOMEDIUM;
2702
2703 if (bs->growable)
2704 return 0;
2705
2706 len = bdrv_getlength(bs);
2707
2708 if (offset < 0)
2709 return -EIO;
2710
2711 if ((offset > len) || (len - offset < size))
2712 return -EIO;
2713
2714 return 0;
2715 }
2716
2717 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2718 int nb_sectors)
2719 {
2720 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2721 return -EIO;
2722 }
2723
2724 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2725 nb_sectors * BDRV_SECTOR_SIZE);
2726 }
2727
2728 typedef struct RwCo {
2729 BlockDriverState *bs;
2730 int64_t offset;
2731 QEMUIOVector *qiov;
2732 bool is_write;
2733 int ret;
2734 BdrvRequestFlags flags;
2735 } RwCo;
2736
2737 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2738 {
2739 RwCo *rwco = opaque;
2740
2741 if (!rwco->is_write) {
2742 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2743 rwco->qiov->size, rwco->qiov,
2744 rwco->flags);
2745 } else {
2746 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2747 rwco->qiov->size, rwco->qiov,
2748 rwco->flags);
2749 }
2750 }
2751
2752 /*
2753 * Process a vectored synchronous request using coroutines
2754 */
2755 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2756 QEMUIOVector *qiov, bool is_write,
2757 BdrvRequestFlags flags)
2758 {
2759 Coroutine *co;
2760 RwCo rwco = {
2761 .bs = bs,
2762 .offset = offset,
2763 .qiov = qiov,
2764 .is_write = is_write,
2765 .ret = NOT_DONE,
2766 .flags = flags,
2767 };
2768
2769 /**
2770 * In sync call context, when the vcpu is blocked, this throttling timer
2771 * will not fire; so the I/O throttling function has to be disabled here
2772 * if it has been enabled.
2773 */
2774 if (bs->io_limits_enabled) {
2775 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2776 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2777 bdrv_io_limits_disable(bs);
2778 }
2779
2780 if (qemu_in_coroutine()) {
2781 /* Fast-path if already in coroutine context */
2782 bdrv_rw_co_entry(&rwco);
2783 } else {
2784 AioContext *aio_context = bdrv_get_aio_context(bs);
2785
2786 co = qemu_coroutine_create(bdrv_rw_co_entry);
2787 qemu_coroutine_enter(co, &rwco);
2788 while (rwco.ret == NOT_DONE) {
2789 aio_poll(aio_context, true);
2790 }
2791 }
2792 return rwco.ret;
2793 }
2794
2795 /*
2796 * Process a synchronous request using coroutines
2797 */
2798 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2799 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2800 {
2801 QEMUIOVector qiov;
2802 struct iovec iov = {
2803 .iov_base = (void *)buf,
2804 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2805 };
2806
2807 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2808 return -EINVAL;
2809 }
2810
2811 qemu_iovec_init_external(&qiov, &iov, 1);
2812 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2813 &qiov, is_write, flags);
2814 }
2815
2816 /* return < 0 if error. See bdrv_write() for the return codes */
2817 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2818 uint8_t *buf, int nb_sectors)
2819 {
2820 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2821 }
2822
2823 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2824 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2825 uint8_t *buf, int nb_sectors)
2826 {
2827 bool enabled;
2828 int ret;
2829
2830 enabled = bs->io_limits_enabled;
2831 bs->io_limits_enabled = false;
2832 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2833 bs->io_limits_enabled = enabled;
2834 return ret;
2835 }
2836
2837 /* Return < 0 if error. Important errors are:
2838 -EIO generic I/O error (may happen for all errors)
2839 -ENOMEDIUM No media inserted.
2840 -EINVAL Invalid sector number or nb_sectors
2841 -EACCES Trying to write a read-only device
2842 */
2843 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2844 const uint8_t *buf, int nb_sectors)
2845 {
2846 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2847 }
2848
2849 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2850 int nb_sectors, BdrvRequestFlags flags)
2851 {
2852 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2853 BDRV_REQ_ZERO_WRITE | flags);
2854 }
2855
2856 /*
2857 * Completely zero out a block device with the help of bdrv_write_zeroes.
2858 * The operation is sped up by checking the block status and only writing
2859 * zeroes to the device if they currently do not return zeroes. Optional
2860 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2861 *
2862 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2863 */
2864 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2865 {
2866 int64_t target_size;
2867 int64_t ret, nb_sectors, sector_num = 0;
2868 int n;
2869
2870 target_size = bdrv_getlength(bs);
2871 if (target_size < 0) {
2872 return target_size;
2873 }
2874 target_size /= BDRV_SECTOR_SIZE;
2875
2876 for (;;) {
2877 nb_sectors = target_size - sector_num;
2878 if (nb_sectors <= 0) {
2879 return 0;
2880 }
2881 if (nb_sectors > INT_MAX) {
2882 nb_sectors = INT_MAX;
2883 }
2884 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2885 if (ret < 0) {
2886 error_report("error getting block status at sector %" PRId64 ": %s",
2887 sector_num, strerror(-ret));
2888 return ret;
2889 }
2890 if (ret & BDRV_BLOCK_ZERO) {
2891 sector_num += n;
2892 continue;
2893 }
2894 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2895 if (ret < 0) {
2896 error_report("error writing zeroes at sector %" PRId64 ": %s",
2897 sector_num, strerror(-ret));
2898 return ret;
2899 }
2900 sector_num += n;
2901 }
2902 }
2903
2904 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2905 {
2906 QEMUIOVector qiov;
2907 struct iovec iov = {
2908 .iov_base = (void *)buf,
2909 .iov_len = bytes,
2910 };
2911 int ret;
2912
2913 if (bytes < 0) {
2914 return -EINVAL;
2915 }
2916
2917 qemu_iovec_init_external(&qiov, &iov, 1);
2918 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2919 if (ret < 0) {
2920 return ret;
2921 }
2922
2923 return bytes;
2924 }
2925
2926 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2927 {
2928 int ret;
2929
2930 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2931 if (ret < 0) {
2932 return ret;
2933 }
2934
2935 return qiov->size;
2936 }
2937
2938 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2939 const void *buf, int bytes)
2940 {
2941 QEMUIOVector qiov;
2942 struct iovec iov = {
2943 .iov_base = (void *) buf,
2944 .iov_len = bytes,
2945 };
2946
2947 if (bytes < 0) {
2948 return -EINVAL;
2949 }
2950
2951 qemu_iovec_init_external(&qiov, &iov, 1);
2952 return bdrv_pwritev(bs, offset, &qiov);
2953 }
2954
2955 /*
2956 * Writes to the file and ensures that no writes are reordered across this
2957 * request (acts as a barrier)
2958 *
2959 * Returns 0 on success, -errno in error cases.
2960 */
2961 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2962 const void *buf, int count)
2963 {
2964 int ret;
2965
2966 ret = bdrv_pwrite(bs, offset, buf, count);
2967 if (ret < 0) {
2968 return ret;
2969 }
2970
2971 /* No flush needed for cache modes that already do it */
2972 if (bs->enable_write_cache) {
2973 bdrv_flush(bs);
2974 }
2975
2976 return 0;
2977 }
2978
2979 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2980 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2981 {
2982 /* Perform I/O through a temporary buffer so that users who scribble over
2983 * their read buffer while the operation is in progress do not end up
2984 * modifying the image file. This is critical for zero-copy guest I/O
2985 * where anything might happen inside guest memory.
2986 */
2987 void *bounce_buffer;
2988
2989 BlockDriver *drv = bs->drv;
2990 struct iovec iov;
2991 QEMUIOVector bounce_qiov;
2992 int64_t cluster_sector_num;
2993 int cluster_nb_sectors;
2994 size_t skip_bytes;
2995 int ret;
2996
2997 /* Cover entire cluster so no additional backing file I/O is required when
2998 * allocating cluster in the image file.
2999 */
3000 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
3001 &cluster_sector_num, &cluster_nb_sectors);
3002
3003 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
3004 cluster_sector_num, cluster_nb_sectors);
3005
3006 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
3007 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
3008 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3009
3010 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3011 &bounce_qiov);
3012 if (ret < 0) {
3013 goto err;
3014 }
3015
3016 if (drv->bdrv_co_write_zeroes &&
3017 buffer_is_zero(bounce_buffer, iov.iov_len)) {
3018 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3019 cluster_nb_sectors, 0);
3020 } else {
3021 /* This does not change the data on the disk, it is not necessary
3022 * to flush even in cache=writethrough mode.
3023 */
3024 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3025 &bounce_qiov);
3026 }
3027
3028 if (ret < 0) {
3029 /* It might be okay to ignore write errors for guest requests. If this
3030 * is a deliberate copy-on-read then we don't want to ignore the error.
3031 * Simply report it in all cases.
3032 */
3033 goto err;
3034 }
3035
3036 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3037 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3038 nb_sectors * BDRV_SECTOR_SIZE);
3039
3040 err:
3041 qemu_vfree(bounce_buffer);
3042 return ret;
3043 }
3044
3045 /*
3046 * Forwards an already correctly aligned request to the BlockDriver. This
3047 * handles copy on read and zeroing after EOF; any other features must be
3048 * implemented by the caller.
3049 */
3050 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3051 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3052 int64_t align, QEMUIOVector *qiov, int flags)
3053 {
3054 BlockDriver *drv = bs->drv;
3055 int ret;
3056
3057 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3058 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3059
3060 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3061 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3062
3063 /* Handle Copy on Read and associated serialisation */
3064 if (flags & BDRV_REQ_COPY_ON_READ) {
3065 /* If we touch the same cluster it counts as an overlap. This
3066 * guarantees that allocating writes will be serialized and not race
3067 * with each other for the same cluster. For example, in copy-on-read
3068 * it ensures that the CoR read and write operations are atomic and
3069 * guest writes cannot interleave between them. */
3070 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3071 }
3072
3073 wait_serialising_requests(req);
3074
3075 if (flags & BDRV_REQ_COPY_ON_READ) {
3076 int pnum;
3077
3078 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3079 if (ret < 0) {
3080 goto out;
3081 }
3082
3083 if (!ret || pnum != nb_sectors) {
3084 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3085 goto out;
3086 }
3087 }
3088
3089 /* Forward the request to the BlockDriver */
3090 if (!(bs->zero_beyond_eof && bs->growable)) {
3091 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3092 } else {
3093 /* Read zeros after EOF of growable BDSes */
3094 int64_t len, total_sectors, max_nb_sectors;
3095
3096 len = bdrv_getlength(bs);
3097 if (len < 0) {
3098 ret = len;
3099 goto out;
3100 }
3101
3102 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3103 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3104 align >> BDRV_SECTOR_BITS);
3105 if (max_nb_sectors > 0) {
3106 ret = drv->bdrv_co_readv(bs, sector_num,
3107 MIN(nb_sectors, max_nb_sectors), qiov);
3108 } else {
3109 ret = 0;
3110 }
3111
3112 /* Reading beyond end of file is supposed to produce zeroes */
3113 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3114 uint64_t offset = MAX(0, total_sectors - sector_num);
3115 uint64_t bytes = (sector_num + nb_sectors - offset) *
3116 BDRV_SECTOR_SIZE;
3117 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3118 }
3119 }
3120
3121 out:
3122 return ret;
3123 }
3124
3125 /*
3126 * Handle a read request in coroutine context
3127 */
3128 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3129 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3130 BdrvRequestFlags flags)
3131 {
3132 BlockDriver *drv = bs->drv;
3133 BdrvTrackedRequest req;
3134
3135 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3136 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3137 uint8_t *head_buf = NULL;
3138 uint8_t *tail_buf = NULL;
3139 QEMUIOVector local_qiov;
3140 bool use_local_qiov = false;
3141 int ret;
3142
3143 if (!drv) {
3144 return -ENOMEDIUM;
3145 }
3146 if (bdrv_check_byte_request(bs, offset, bytes)) {
3147 return -EIO;
3148 }
3149
3150 if (bs->copy_on_read) {
3151 flags |= BDRV_REQ_COPY_ON_READ;
3152 }
3153
3154 /* throttling disk I/O */
3155 if (bs->io_limits_enabled) {
3156 bdrv_io_limits_intercept(bs, bytes, false);
3157 }
3158
3159 /* Align read if necessary by padding qiov */
3160 if (offset & (align - 1)) {
3161 head_buf = qemu_blockalign(bs, align);
3162 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3163 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3164 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3165 use_local_qiov = true;
3166
3167 bytes += offset & (align - 1);
3168 offset = offset & ~(align - 1);
3169 }
3170
3171 if ((offset + bytes) & (align - 1)) {
3172 if (!use_local_qiov) {
3173 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3174 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3175 use_local_qiov = true;
3176 }
3177 tail_buf = qemu_blockalign(bs, align);
3178 qemu_iovec_add(&local_qiov, tail_buf,
3179 align - ((offset + bytes) & (align - 1)));
3180
3181 bytes = ROUND_UP(bytes, align);
3182 }
3183
3184 tracked_request_begin(&req, bs, offset, bytes, false);
3185 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3186 use_local_qiov ? &local_qiov : qiov,
3187 flags);
3188 tracked_request_end(&req);
3189
3190 if (use_local_qiov) {
3191 qemu_iovec_destroy(&local_qiov);
3192 qemu_vfree(head_buf);
3193 qemu_vfree(tail_buf);
3194 }
3195
3196 return ret;
3197 }
3198
3199 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3200 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3201 BdrvRequestFlags flags)
3202 {
3203 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3204 return -EINVAL;
3205 }
3206
3207 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3208 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3209 }
3210
3211 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3212 int nb_sectors, QEMUIOVector *qiov)
3213 {
3214 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3215
3216 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3217 }
3218
3219 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3220 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3221 {
3222 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3223
3224 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3225 BDRV_REQ_COPY_ON_READ);
3226 }
3227
3228 /* if no limit is specified in the BlockLimits use a default
3229 * of 32768 512-byte sectors (16 MiB) per request.
3230 */
3231 #define MAX_WRITE_ZEROES_DEFAULT 32768
3232
3233 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3234 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3235 {
3236 BlockDriver *drv = bs->drv;
3237 QEMUIOVector qiov;
3238 struct iovec iov = {0};
3239 int ret = 0;
3240
3241 int max_write_zeroes = bs->bl.max_write_zeroes ?
3242 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3243
3244 while (nb_sectors > 0 && !ret) {
3245 int num = nb_sectors;
3246
3247 /* Align request. Block drivers can expect the "bulk" of the request
3248 * to be aligned.
3249 */
3250 if (bs->bl.write_zeroes_alignment
3251 && num > bs->bl.write_zeroes_alignment) {
3252 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3253 /* Make a small request up to the first aligned sector. */
3254 num = bs->bl.write_zeroes_alignment;
3255 num -= sector_num % bs->bl.write_zeroes_alignment;
3256 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3257 /* Shorten the request to the last aligned sector. num cannot
3258 * underflow because num > bs->bl.write_zeroes_alignment.
3259 */
3260 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3261 }
3262 }
3263
3264 /* limit request size */
3265 if (num > max_write_zeroes) {
3266 num = max_write_zeroes;
3267 }
3268
3269 ret = -ENOTSUP;
3270 /* First try the efficient write zeroes operation */
3271 if (drv->bdrv_co_write_zeroes) {
3272 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3273 }
3274
3275 if (ret == -ENOTSUP) {
3276 /* Fall back to bounce buffer if write zeroes is unsupported */
3277 iov.iov_len = num * BDRV_SECTOR_SIZE;
3278 if (iov.iov_base == NULL) {
3279 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3280 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3281 }
3282 qemu_iovec_init_external(&qiov, &iov, 1);
3283
3284 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3285
3286 /* Keep bounce buffer around if it is big enough for all
3287 * all future requests.
3288 */
3289 if (num < max_write_zeroes) {
3290 qemu_vfree(iov.iov_base);
3291 iov.iov_base = NULL;
3292 }
3293 }
3294
3295 sector_num += num;
3296 nb_sectors -= num;
3297 }
3298
3299 qemu_vfree(iov.iov_base);
3300 return ret;
3301 }
3302
3303 /*
3304 * Forwards an already correctly aligned write request to the BlockDriver.
3305 */
3306 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3307 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3308 QEMUIOVector *qiov, int flags)
3309 {
3310 BlockDriver *drv = bs->drv;
3311 bool waited;
3312 int ret;
3313
3314 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3315 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3316
3317 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3318 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3319
3320 waited = wait_serialising_requests(req);
3321 assert(!waited || !req->serialising);
3322 assert(req->overlap_offset <= offset);
3323 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3324
3325 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3326
3327 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3328 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3329 qemu_iovec_is_zero(qiov)) {
3330 flags |= BDRV_REQ_ZERO_WRITE;
3331 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3332 flags |= BDRV_REQ_MAY_UNMAP;
3333 }
3334 }
3335
3336 if (ret < 0) {
3337 /* Do nothing, write notifier decided to fail this request */
3338 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3339 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3340 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3341 } else {
3342 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3343 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3344 }
3345 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3346
3347 if (ret == 0 && !bs->enable_write_cache) {
3348 ret = bdrv_co_flush(bs);
3349 }
3350
3351 bdrv_set_dirty(bs, sector_num, nb_sectors);
3352
3353 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3354 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3355 }
3356 if (bs->growable && ret >= 0) {
3357 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3358 }
3359
3360 return ret;
3361 }
3362
3363 /*
3364 * Handle a write request in coroutine context
3365 */
3366 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3367 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3368 BdrvRequestFlags flags)
3369 {
3370 BdrvTrackedRequest req;
3371 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3372 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3373 uint8_t *head_buf = NULL;
3374 uint8_t *tail_buf = NULL;
3375 QEMUIOVector local_qiov;
3376 bool use_local_qiov = false;
3377 int ret;
3378
3379 if (!bs->drv) {
3380 return -ENOMEDIUM;
3381 }
3382 if (bs->read_only) {
3383 return -EACCES;
3384 }
3385 if (bdrv_check_byte_request(bs, offset, bytes)) {
3386 return -EIO;
3387 }
3388
3389 /* throttling disk I/O */
3390 if (bs->io_limits_enabled) {
3391 bdrv_io_limits_intercept(bs, bytes, true);
3392 }
3393
3394 /*
3395 * Align write if necessary by performing a read-modify-write cycle.
3396 * Pad qiov with the read parts and be sure to have a tracked request not
3397 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3398 */
3399 tracked_request_begin(&req, bs, offset, bytes, true);
3400
3401 if (offset & (align - 1)) {
3402 QEMUIOVector head_qiov;
3403 struct iovec head_iov;
3404
3405 mark_request_serialising(&req, align);
3406 wait_serialising_requests(&req);
3407
3408 head_buf = qemu_blockalign(bs, align);
3409 head_iov = (struct iovec) {
3410 .iov_base = head_buf,
3411 .iov_len = align,
3412 };
3413 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3414
3415 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3416 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3417 align, &head_qiov, 0);
3418 if (ret < 0) {
3419 goto fail;
3420 }
3421 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3422
3423 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3424 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3425 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3426 use_local_qiov = true;
3427
3428 bytes += offset & (align - 1);
3429 offset = offset & ~(align - 1);
3430 }
3431
3432 if ((offset + bytes) & (align - 1)) {
3433 QEMUIOVector tail_qiov;
3434 struct iovec tail_iov;
3435 size_t tail_bytes;
3436 bool waited;
3437
3438 mark_request_serialising(&req, align);
3439 waited = wait_serialising_requests(&req);
3440 assert(!waited || !use_local_qiov);
3441
3442 tail_buf = qemu_blockalign(bs, align);
3443 tail_iov = (struct iovec) {
3444 .iov_base = tail_buf,
3445 .iov_len = align,
3446 };
3447 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3448
3449 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3450 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3451 align, &tail_qiov, 0);
3452 if (ret < 0) {
3453 goto fail;
3454 }
3455 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3456
3457 if (!use_local_qiov) {
3458 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3459 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3460 use_local_qiov = true;
3461 }
3462
3463 tail_bytes = (offset + bytes) & (align - 1);
3464 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3465
3466 bytes = ROUND_UP(bytes, align);
3467 }
3468
3469 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3470 use_local_qiov ? &local_qiov : qiov,
3471 flags);
3472
3473 fail:
3474 tracked_request_end(&req);
3475
3476 if (use_local_qiov) {
3477 qemu_iovec_destroy(&local_qiov);
3478 }
3479 qemu_vfree(head_buf);
3480 qemu_vfree(tail_buf);
3481
3482 return ret;
3483 }
3484
3485 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3486 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3487 BdrvRequestFlags flags)
3488 {
3489 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3490 return -EINVAL;
3491 }
3492
3493 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3494 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3495 }
3496
3497 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3498 int nb_sectors, QEMUIOVector *qiov)
3499 {
3500 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3501
3502 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3503 }
3504
3505 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3506 int64_t sector_num, int nb_sectors,
3507 BdrvRequestFlags flags)
3508 {
3509 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3510
3511 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3512 flags &= ~BDRV_REQ_MAY_UNMAP;
3513 }
3514
3515 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3516 BDRV_REQ_ZERO_WRITE | flags);
3517 }
3518
3519 /**
3520 * Truncate file to 'offset' bytes (needed only for file protocols)
3521 */
3522 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3523 {
3524 BlockDriver *drv = bs->drv;
3525 int ret;
3526 if (!drv)
3527 return -ENOMEDIUM;
3528 if (!drv->bdrv_truncate)
3529 return -ENOTSUP;
3530 if (bs->read_only)
3531 return -EACCES;
3532 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3533 return -EBUSY;
3534 }
3535 ret = drv->bdrv_truncate(bs, offset);
3536 if (ret == 0) {
3537 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3538 bdrv_dev_resize_cb(bs);
3539 }
3540 return ret;
3541 }
3542
3543 /**
3544 * Length of a allocated file in bytes. Sparse files are counted by actual
3545 * allocated space. Return < 0 if error or unknown.
3546 */
3547 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3548 {
3549 BlockDriver *drv = bs->drv;
3550 if (!drv) {
3551 return -ENOMEDIUM;
3552 }
3553 if (drv->bdrv_get_allocated_file_size) {
3554 return drv->bdrv_get_allocated_file_size(bs);
3555 }
3556 if (bs->file) {
3557 return bdrv_get_allocated_file_size(bs->file);
3558 }
3559 return -ENOTSUP;
3560 }
3561
3562 /**
3563 * Length of a file in bytes. Return < 0 if error or unknown.
3564 */
3565 int64_t bdrv_getlength(BlockDriverState *bs)
3566 {
3567 BlockDriver *drv = bs->drv;
3568 if (!drv)
3569 return -ENOMEDIUM;
3570
3571 if (drv->has_variable_length) {
3572 int ret = refresh_total_sectors(bs, bs->total_sectors);
3573 if (ret < 0) {
3574 return ret;
3575 }
3576 }
3577 return bs->total_sectors * BDRV_SECTOR_SIZE;
3578 }
3579
3580 /* return 0 as number of sectors if no device present or error */
3581 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3582 {
3583 int64_t length;
3584 length = bdrv_getlength(bs);
3585 if (length < 0)
3586 length = 0;
3587 else
3588 length = length >> BDRV_SECTOR_BITS;
3589 *nb_sectors_ptr = length;
3590 }
3591
3592 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3593 BlockdevOnError on_write_error)
3594 {
3595 bs->on_read_error = on_read_error;
3596 bs->on_write_error = on_write_error;
3597 }
3598
3599 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3600 {
3601 return is_read ? bs->on_read_error : bs->on_write_error;
3602 }
3603
3604 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3605 {
3606 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3607
3608 switch (on_err) {
3609 case BLOCKDEV_ON_ERROR_ENOSPC:
3610 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3611 case BLOCKDEV_ON_ERROR_STOP:
3612 return BDRV_ACTION_STOP;
3613 case BLOCKDEV_ON_ERROR_REPORT:
3614 return BDRV_ACTION_REPORT;
3615 case BLOCKDEV_ON_ERROR_IGNORE:
3616 return BDRV_ACTION_IGNORE;
3617 default:
3618 abort();
3619 }
3620 }
3621
3622 /* This is done by device models because, while the block layer knows
3623 * about the error, it does not know whether an operation comes from
3624 * the device or the block layer (from a job, for example).
3625 */
3626 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3627 bool is_read, int error)
3628 {
3629 assert(error >= 0);
3630 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3631 if (action == BDRV_ACTION_STOP) {
3632 vm_stop(RUN_STATE_IO_ERROR);
3633 bdrv_iostatus_set_err(bs, error);
3634 }
3635 }
3636
3637 int bdrv_is_read_only(BlockDriverState *bs)
3638 {
3639 return bs->read_only;
3640 }
3641
3642 int bdrv_is_sg(BlockDriverState *bs)
3643 {
3644 return bs->sg;
3645 }
3646
3647 int bdrv_enable_write_cache(BlockDriverState *bs)
3648 {
3649 return bs->enable_write_cache;
3650 }
3651
3652 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3653 {
3654 bs->enable_write_cache = wce;
3655
3656 /* so a reopen() will preserve wce */
3657 if (wce) {
3658 bs->open_flags |= BDRV_O_CACHE_WB;
3659 } else {
3660 bs->open_flags &= ~BDRV_O_CACHE_WB;
3661 }
3662 }
3663
3664 int bdrv_is_encrypted(BlockDriverState *bs)
3665 {
3666 if (bs->backing_hd && bs->backing_hd->encrypted)
3667 return 1;
3668 return bs->encrypted;
3669 }
3670
3671 int bdrv_key_required(BlockDriverState *bs)
3672 {
3673 BlockDriverState *backing_hd = bs->backing_hd;
3674
3675 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3676 return 1;
3677 return (bs->encrypted && !bs->valid_key);
3678 }
3679
3680 int bdrv_set_key(BlockDriverState *bs, const char *key)
3681 {
3682 int ret;
3683 if (bs->backing_hd && bs->backing_hd->encrypted) {
3684 ret = bdrv_set_key(bs->backing_hd, key);
3685 if (ret < 0)
3686 return ret;
3687 if (!bs->encrypted)
3688 return 0;
3689 }
3690 if (!bs->encrypted) {
3691 return -EINVAL;
3692 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3693 return -ENOMEDIUM;
3694 }
3695 ret = bs->drv->bdrv_set_key(bs, key);
3696 if (ret < 0) {
3697 bs->valid_key = 0;
3698 } else if (!bs->valid_key) {
3699 bs->valid_key = 1;
3700 /* call the change callback now, we skipped it on open */
3701 bdrv_dev_change_media_cb(bs, true);
3702 }
3703 return ret;
3704 }
3705
3706 const char *bdrv_get_format_name(BlockDriverState *bs)
3707 {
3708 return bs->drv ? bs->drv->format_name : NULL;
3709 }
3710
3711 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3712 void *opaque)
3713 {
3714 BlockDriver *drv;
3715 int count = 0;
3716 const char **formats = NULL;
3717
3718 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3719 if (drv->format_name) {
3720 bool found = false;
3721 int i = count;
3722 while (formats && i && !found) {
3723 found = !strcmp(formats[--i], drv->format_name);
3724 }
3725
3726 if (!found) {
3727 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3728 formats[count++] = drv->format_name;
3729 it(opaque, drv->format_name);
3730 }
3731 }
3732 }
3733 g_free(formats);
3734 }
3735
3736 /* This function is to find block backend bs */
3737 BlockDriverState *bdrv_find(const char *name)
3738 {
3739 BlockDriverState *bs;
3740
3741 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3742 if (!strcmp(name, bs->device_name)) {
3743 return bs;
3744 }
3745 }
3746 return NULL;
3747 }
3748
3749 /* This function is to find a node in the bs graph */
3750 BlockDriverState *bdrv_find_node(const char *node_name)
3751 {
3752 BlockDriverState *bs;
3753
3754 assert(node_name);
3755
3756 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3757 if (!strcmp(node_name, bs->node_name)) {
3758 return bs;
3759 }
3760 }
3761 return NULL;
3762 }
3763
3764 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3765 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3766 {
3767 BlockDeviceInfoList *list, *entry;
3768 BlockDriverState *bs;
3769
3770 list = NULL;
3771 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3772 entry = g_malloc0(sizeof(*entry));
3773 entry->value = bdrv_block_device_info(bs);
3774 entry->next = list;
3775 list = entry;
3776 }
3777
3778 return list;
3779 }
3780
3781 BlockDriverState *bdrv_lookup_bs(const char *device,
3782 const char *node_name,
3783 Error **errp)
3784 {
3785 BlockDriverState *bs = NULL;
3786
3787 if (device) {
3788 bs = bdrv_find(device);
3789
3790 if (bs) {
3791 return bs;
3792 }
3793 }
3794
3795 if (node_name) {
3796 bs = bdrv_find_node(node_name);
3797
3798 if (bs) {
3799 return bs;
3800 }
3801 }
3802
3803 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3804 device ? device : "",
3805 node_name ? node_name : "");
3806 return NULL;
3807 }
3808
3809 BlockDriverState *bdrv_next(BlockDriverState *bs)
3810 {
3811 if (!bs) {
3812 return QTAILQ_FIRST(&bdrv_states);
3813 }
3814 return QTAILQ_NEXT(bs, device_list);
3815 }
3816
3817 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3818 {
3819 BlockDriverState *bs;
3820
3821 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3822 it(opaque, bs);
3823 }
3824 }
3825
3826 const char *bdrv_get_device_name(BlockDriverState *bs)
3827 {
3828 return bs->device_name;
3829 }
3830
3831 int bdrv_get_flags(BlockDriverState *bs)
3832 {
3833 return bs->open_flags;
3834 }
3835
3836 int bdrv_flush_all(void)
3837 {
3838 BlockDriverState *bs;
3839 int result = 0;
3840
3841 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3842 AioContext *aio_context = bdrv_get_aio_context(bs);
3843 int ret;
3844
3845 aio_context_acquire(aio_context);
3846 ret = bdrv_flush(bs);
3847 if (ret < 0 && !result) {
3848 result = ret;
3849 }
3850 aio_context_release(aio_context);
3851 }
3852
3853 return result;
3854 }
3855
3856 int bdrv_has_zero_init_1(BlockDriverState *bs)
3857 {
3858 return 1;
3859 }
3860
3861 int bdrv_has_zero_init(BlockDriverState *bs)
3862 {
3863 assert(bs->drv);
3864
3865 /* If BS is a copy on write image, it is initialized to
3866 the contents of the base image, which may not be zeroes. */
3867 if (bs->backing_hd) {
3868 return 0;
3869 }
3870 if (bs->drv->bdrv_has_zero_init) {
3871 return bs->drv->bdrv_has_zero_init(bs);
3872 }
3873
3874 /* safe default */
3875 return 0;
3876 }
3877
3878 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3879 {
3880 BlockDriverInfo bdi;
3881
3882 if (bs->backing_hd) {
3883 return false;
3884 }
3885
3886 if (bdrv_get_info(bs, &bdi) == 0) {
3887 return bdi.unallocated_blocks_are_zero;
3888 }
3889
3890 return false;
3891 }
3892
3893 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3894 {
3895 BlockDriverInfo bdi;
3896
3897 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3898 return false;
3899 }
3900
3901 if (bdrv_get_info(bs, &bdi) == 0) {
3902 return bdi.can_write_zeroes_with_unmap;
3903 }
3904
3905 return false;
3906 }
3907
3908 typedef struct BdrvCoGetBlockStatusData {
3909 BlockDriverState *bs;
3910 BlockDriverState *base;
3911 int64_t sector_num;
3912 int nb_sectors;
3913 int *pnum;
3914 int64_t ret;
3915 bool done;
3916 } BdrvCoGetBlockStatusData;
3917
3918 /*
3919 * Returns true iff the specified sector is present in the disk image. Drivers
3920 * not implementing the functionality are assumed to not support backing files,
3921 * hence all their sectors are reported as allocated.
3922 *
3923 * If 'sector_num' is beyond the end of the disk image the return value is 0
3924 * and 'pnum' is set to 0.
3925 *
3926 * 'pnum' is set to the number of sectors (including and immediately following
3927 * the specified sector) that are known to be in the same
3928 * allocated/unallocated state.
3929 *
3930 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3931 * beyond the end of the disk image it will be clamped.
3932 */
3933 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3934 int64_t sector_num,
3935 int nb_sectors, int *pnum)
3936 {
3937 int64_t length;
3938 int64_t n;
3939 int64_t ret, ret2;
3940
3941 length = bdrv_getlength(bs);
3942 if (length < 0) {
3943 return length;
3944 }
3945
3946 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3947 *pnum = 0;
3948 return 0;
3949 }
3950
3951 n = bs->total_sectors - sector_num;
3952 if (n < nb_sectors) {
3953 nb_sectors = n;
3954 }
3955
3956 if (!bs->drv->bdrv_co_get_block_status) {
3957 *pnum = nb_sectors;
3958 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3959 if (bs->drv->protocol_name) {
3960 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3961 }
3962 return ret;
3963 }
3964
3965 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3966 if (ret < 0) {
3967 *pnum = 0;
3968 return ret;
3969 }
3970
3971 if (ret & BDRV_BLOCK_RAW) {
3972 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3973 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3974 *pnum, pnum);
3975 }
3976
3977 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3978 ret |= BDRV_BLOCK_ALLOCATED;
3979 }
3980
3981 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3982 if (bdrv_unallocated_blocks_are_zero(bs)) {
3983 ret |= BDRV_BLOCK_ZERO;
3984 } else if (bs->backing_hd) {
3985 BlockDriverState *bs2 = bs->backing_hd;
3986 int64_t length2 = bdrv_getlength(bs2);
3987 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3988 ret |= BDRV_BLOCK_ZERO;
3989 }
3990 }
3991 }
3992
3993 if (bs->file &&
3994 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3995 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3996 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3997 *pnum, pnum);
3998 if (ret2 >= 0) {
3999 /* Ignore errors. This is just providing extra information, it
4000 * is useful but not necessary.
4001 */
4002 ret |= (ret2 & BDRV_BLOCK_ZERO);
4003 }
4004 }
4005
4006 return ret;
4007 }
4008
4009 /* Coroutine wrapper for bdrv_get_block_status() */
4010 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4011 {
4012 BdrvCoGetBlockStatusData *data = opaque;
4013 BlockDriverState *bs = data->bs;
4014
4015 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4016 data->pnum);
4017 data->done = true;
4018 }
4019
4020 /*
4021 * Synchronous wrapper around bdrv_co_get_block_status().
4022 *
4023 * See bdrv_co_get_block_status() for details.
4024 */
4025 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4026 int nb_sectors, int *pnum)
4027 {
4028 Coroutine *co;
4029 BdrvCoGetBlockStatusData data = {
4030 .bs = bs,
4031 .sector_num = sector_num,
4032 .nb_sectors = nb_sectors,
4033 .pnum = pnum,
4034 .done = false,
4035 };
4036
4037 if (qemu_in_coroutine()) {
4038 /* Fast-path if already in coroutine context */
4039 bdrv_get_block_status_co_entry(&data);
4040 } else {
4041 AioContext *aio_context = bdrv_get_aio_context(bs);
4042
4043 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4044 qemu_coroutine_enter(co, &data);
4045 while (!data.done) {
4046 aio_poll(aio_context, true);
4047 }
4048 }
4049 return data.ret;
4050 }
4051
4052 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4053 int nb_sectors, int *pnum)
4054 {
4055 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4056 if (ret < 0) {
4057 return ret;
4058 }
4059 return (ret & BDRV_BLOCK_ALLOCATED);
4060 }
4061
4062 /*
4063 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4064 *
4065 * Return true if the given sector is allocated in any image between
4066 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4067 * sector is allocated in any image of the chain. Return false otherwise.
4068 *
4069 * 'pnum' is set to the number of sectors (including and immediately following
4070 * the specified sector) that are known to be in the same
4071 * allocated/unallocated state.
4072 *
4073 */
4074 int bdrv_is_allocated_above(BlockDriverState *top,
4075 BlockDriverState *base,
4076 int64_t sector_num,
4077 int nb_sectors, int *pnum)
4078 {
4079 BlockDriverState *intermediate;
4080 int ret, n = nb_sectors;
4081
4082 intermediate = top;
4083 while (intermediate && intermediate != base) {
4084 int pnum_inter;
4085 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4086 &pnum_inter);
4087 if (ret < 0) {
4088 return ret;
4089 } else if (ret) {
4090 *pnum = pnum_inter;
4091 return 1;
4092 }
4093
4094 /*
4095 * [sector_num, nb_sectors] is unallocated on top but intermediate
4096 * might have
4097 *
4098 * [sector_num+x, nr_sectors] allocated.
4099 */
4100 if (n > pnum_inter &&
4101 (intermediate == top ||
4102 sector_num + pnum_inter < intermediate->total_sectors)) {
4103 n = pnum_inter;
4104 }
4105
4106 intermediate = intermediate->backing_hd;
4107 }
4108
4109 *pnum = n;
4110 return 0;
4111 }
4112
4113 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4114 {
4115 if (bs->backing_hd && bs->backing_hd->encrypted)
4116 return bs->backing_file;
4117 else if (bs->encrypted)
4118 return bs->filename;
4119 else
4120 return NULL;
4121 }
4122
4123 void bdrv_get_backing_filename(BlockDriverState *bs,
4124 char *filename, int filename_size)
4125 {
4126 pstrcpy(filename, filename_size, bs->backing_file);
4127 }
4128
4129 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4130 const uint8_t *buf, int nb_sectors)
4131 {
4132 BlockDriver *drv = bs->drv;
4133 if (!drv)
4134 return -ENOMEDIUM;
4135 if (!drv->bdrv_write_compressed)
4136 return -ENOTSUP;
4137 if (bdrv_check_request(bs, sector_num, nb_sectors))
4138 return -EIO;
4139
4140 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4141
4142 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4143 }
4144
4145 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4146 {
4147 BlockDriver *drv = bs->drv;
4148 if (!drv)
4149 return -ENOMEDIUM;
4150 if (!drv->bdrv_get_info)
4151 return -ENOTSUP;
4152 memset(bdi, 0, sizeof(*bdi));
4153 return drv->bdrv_get_info(bs, bdi);
4154 }
4155
4156 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4157 {
4158 BlockDriver *drv = bs->drv;
4159 if (drv && drv->bdrv_get_specific_info) {
4160 return drv->bdrv_get_specific_info(bs);
4161 }
4162 return NULL;
4163 }
4164
4165 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4166 int64_t pos, int size)
4167 {
4168 QEMUIOVector qiov;
4169 struct iovec iov = {
4170 .iov_base = (void *) buf,
4171 .iov_len = size,
4172 };
4173
4174 qemu_iovec_init_external(&qiov, &iov, 1);
4175 return bdrv_writev_vmstate(bs, &qiov, pos);
4176 }
4177
4178 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4179 {
4180 BlockDriver *drv = bs->drv;
4181
4182 if (!drv) {
4183 return -ENOMEDIUM;
4184 } else if (drv->bdrv_save_vmstate) {
4185 return drv->bdrv_save_vmstate(bs, qiov, pos);
4186 } else if (bs->file) {
4187 return bdrv_writev_vmstate(bs->file, qiov, pos);
4188 }
4189
4190 return -ENOTSUP;
4191 }
4192
4193 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4194 int64_t pos, int size)
4195 {
4196 BlockDriver *drv = bs->drv;
4197 if (!drv)
4198 return -ENOMEDIUM;
4199 if (drv->bdrv_load_vmstate)
4200 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4201 if (bs->file)
4202 return bdrv_load_vmstate(bs->file, buf, pos, size);
4203 return -ENOTSUP;
4204 }
4205
4206 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4207 {
4208 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4209 return;
4210 }
4211
4212 bs->drv->bdrv_debug_event(bs, event);
4213 }
4214
4215 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4216 const char *tag)
4217 {
4218 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4219 bs = bs->file;
4220 }
4221
4222 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4223 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4224 }
4225
4226 return -ENOTSUP;
4227 }
4228
4229 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4230 {
4231 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4232 bs = bs->file;
4233 }
4234
4235 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4236 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4237 }
4238
4239 return -ENOTSUP;
4240 }
4241
4242 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4243 {
4244 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4245 bs = bs->file;
4246 }
4247
4248 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4249 return bs->drv->bdrv_debug_resume(bs, tag);
4250 }
4251
4252 return -ENOTSUP;
4253 }
4254
4255 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4256 {
4257 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4258 bs = bs->file;
4259 }
4260
4261 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4262 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4263 }
4264
4265 return false;
4266 }
4267
4268 int bdrv_is_snapshot(BlockDriverState *bs)
4269 {
4270 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4271 }
4272
4273 /* backing_file can either be relative, or absolute, or a protocol. If it is
4274 * relative, it must be relative to the chain. So, passing in bs->filename
4275 * from a BDS as backing_file should not be done, as that may be relative to
4276 * the CWD rather than the chain. */
4277 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4278 const char *backing_file)
4279 {
4280 char *filename_full = NULL;
4281 char *backing_file_full = NULL;
4282 char *filename_tmp = NULL;
4283 int is_protocol = 0;
4284 BlockDriverState *curr_bs = NULL;
4285 BlockDriverState *retval = NULL;
4286
4287 if (!bs || !bs->drv || !backing_file) {
4288 return NULL;
4289 }
4290
4291 filename_full = g_malloc(PATH_MAX);
4292 backing_file_full = g_malloc(PATH_MAX);
4293 filename_tmp = g_malloc(PATH_MAX);
4294
4295 is_protocol = path_has_protocol(backing_file);
4296
4297 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4298
4299 /* If either of the filename paths is actually a protocol, then
4300 * compare unmodified paths; otherwise make paths relative */
4301 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4302 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4303 retval = curr_bs->backing_hd;
4304 break;
4305 }
4306 } else {
4307 /* If not an absolute filename path, make it relative to the current
4308 * image's filename path */
4309 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4310 backing_file);
4311
4312 /* We are going to compare absolute pathnames */
4313 if (!realpath(filename_tmp, filename_full)) {
4314 continue;
4315 }
4316
4317 /* We need to make sure the backing filename we are comparing against
4318 * is relative to the current image filename (or absolute) */
4319 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4320 curr_bs->backing_file);
4321
4322 if (!realpath(filename_tmp, backing_file_full)) {
4323 continue;
4324 }
4325
4326 if (strcmp(backing_file_full, filename_full) == 0) {
4327 retval = curr_bs->backing_hd;
4328 break;
4329 }
4330 }
4331 }
4332
4333 g_free(filename_full);
4334 g_free(backing_file_full);
4335 g_free(filename_tmp);
4336 return retval;
4337 }
4338
4339 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4340 {
4341 if (!bs->drv) {
4342 return 0;
4343 }
4344
4345 if (!bs->backing_hd) {
4346 return 0;
4347 }
4348
4349 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4350 }
4351
4352 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4353 {
4354 BlockDriverState *curr_bs = NULL;
4355
4356 if (!bs) {
4357 return NULL;
4358 }
4359
4360 curr_bs = bs;
4361
4362 while (curr_bs->backing_hd) {
4363 curr_bs = curr_bs->backing_hd;
4364 }
4365 return curr_bs;
4366 }
4367
4368 /**************************************************************/
4369 /* async I/Os */
4370
4371 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4372 QEMUIOVector *qiov, int nb_sectors,
4373 BlockDriverCompletionFunc *cb, void *opaque)
4374 {
4375 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4376
4377 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4378 cb, opaque, false);
4379 }
4380
4381 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4382 QEMUIOVector *qiov, int nb_sectors,
4383 BlockDriverCompletionFunc *cb, void *opaque)
4384 {
4385 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4386
4387 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4388 cb, opaque, true);
4389 }
4390
4391 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4392 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4393 BlockDriverCompletionFunc *cb, void *opaque)
4394 {
4395 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4396
4397 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4398 BDRV_REQ_ZERO_WRITE | flags,
4399 cb, opaque, true);
4400 }
4401
4402
4403 typedef struct MultiwriteCB {
4404 int error;
4405 int num_requests;
4406 int num_callbacks;
4407 struct {
4408 BlockDriverCompletionFunc *cb;
4409 void *opaque;
4410 QEMUIOVector *free_qiov;
4411 } callbacks[];
4412 } MultiwriteCB;
4413
4414 static void multiwrite_user_cb(MultiwriteCB *mcb)
4415 {
4416 int i;
4417
4418 for (i = 0; i < mcb->num_callbacks; i++) {
4419 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4420 if (mcb->callbacks[i].free_qiov) {
4421 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4422 }
4423 g_free(mcb->callbacks[i].free_qiov);
4424 }
4425 }
4426
4427 static void multiwrite_cb(void *opaque, int ret)
4428 {
4429 MultiwriteCB *mcb = opaque;
4430
4431 trace_multiwrite_cb(mcb, ret);
4432
4433 if (ret < 0 && !mcb->error) {
4434 mcb->error = ret;
4435 }
4436
4437 mcb->num_requests--;
4438 if (mcb->num_requests == 0) {
4439 multiwrite_user_cb(mcb);
4440 g_free(mcb);
4441 }
4442 }
4443
4444 static int multiwrite_req_compare(const void *a, const void *b)
4445 {
4446 const BlockRequest *req1 = a, *req2 = b;
4447
4448 /*
4449 * Note that we can't simply subtract req2->sector from req1->sector
4450 * here as that could overflow the return value.
4451 */
4452 if (req1->sector > req2->sector) {
4453 return 1;
4454 } else if (req1->sector < req2->sector) {
4455 return -1;
4456 } else {
4457 return 0;
4458 }
4459 }
4460
4461 /*
4462 * Takes a bunch of requests and tries to merge them. Returns the number of
4463 * requests that remain after merging.
4464 */
4465 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4466 int num_reqs, MultiwriteCB *mcb)
4467 {
4468 int i, outidx;
4469
4470 // Sort requests by start sector
4471 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4472
4473 // Check if adjacent requests touch the same clusters. If so, combine them,
4474 // filling up gaps with zero sectors.
4475 outidx = 0;
4476 for (i = 1; i < num_reqs; i++) {
4477 int merge = 0;
4478 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4479
4480 // Handle exactly sequential writes and overlapping writes.
4481 if (reqs[i].sector <= oldreq_last) {
4482 merge = 1;
4483 }
4484
4485 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4486 merge = 0;
4487 }
4488
4489 if (merge) {
4490 size_t size;
4491 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4492 qemu_iovec_init(qiov,
4493 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4494
4495 // Add the first request to the merged one. If the requests are
4496 // overlapping, drop the last sectors of the first request.
4497 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4498 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4499
4500 // We should need to add any zeros between the two requests
4501 assert (reqs[i].sector <= oldreq_last);
4502
4503 // Add the second request
4504 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4505
4506 reqs[outidx].nb_sectors = qiov->size >> 9;
4507 reqs[outidx].qiov = qiov;
4508
4509 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4510 } else {
4511 outidx++;
4512 reqs[outidx].sector = reqs[i].sector;
4513 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4514 reqs[outidx].qiov = reqs[i].qiov;
4515 }
4516 }
4517
4518 return outidx + 1;
4519 }
4520
4521 /*
4522 * Submit multiple AIO write requests at once.
4523 *
4524 * On success, the function returns 0 and all requests in the reqs array have
4525 * been submitted. In error case this function returns -1, and any of the
4526 * requests may or may not be submitted yet. In particular, this means that the
4527 * callback will be called for some of the requests, for others it won't. The
4528 * caller must check the error field of the BlockRequest to wait for the right
4529 * callbacks (if error != 0, no callback will be called).
4530 *
4531 * The implementation may modify the contents of the reqs array, e.g. to merge
4532 * requests. However, the fields opaque and error are left unmodified as they
4533 * are used to signal failure for a single request to the caller.
4534 */
4535 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4536 {
4537 MultiwriteCB *mcb;
4538 int i;
4539
4540 /* don't submit writes if we don't have a medium */
4541 if (bs->drv == NULL) {
4542 for (i = 0; i < num_reqs; i++) {
4543 reqs[i].error = -ENOMEDIUM;
4544 }
4545 return -1;
4546 }
4547
4548 if (num_reqs == 0) {
4549 return 0;
4550 }
4551
4552 // Create MultiwriteCB structure
4553 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4554 mcb->num_requests = 0;
4555 mcb->num_callbacks = num_reqs;
4556
4557 for (i = 0; i < num_reqs; i++) {
4558 mcb->callbacks[i].cb = reqs[i].cb;
4559 mcb->callbacks[i].opaque = reqs[i].opaque;
4560 }
4561
4562 // Check for mergable requests
4563 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4564
4565 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4566
4567 /* Run the aio requests. */
4568 mcb->num_requests = num_reqs;
4569 for (i = 0; i < num_reqs; i++) {
4570 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4571 reqs[i].nb_sectors, reqs[i].flags,
4572 multiwrite_cb, mcb,
4573 true);
4574 }
4575
4576 return 0;
4577 }
4578
4579 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4580 {
4581 acb->aiocb_info->cancel(acb);
4582 }
4583
4584 /**************************************************************/
4585 /* async block device emulation */
4586
4587 typedef struct BlockDriverAIOCBSync {
4588 BlockDriverAIOCB common;
4589 QEMUBH *bh;
4590 int ret;
4591 /* vector translation state */
4592 QEMUIOVector *qiov;
4593 uint8_t *bounce;
4594 int is_write;
4595 } BlockDriverAIOCBSync;
4596
4597 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4598 {
4599 BlockDriverAIOCBSync *acb =
4600 container_of(blockacb, BlockDriverAIOCBSync, common);
4601 qemu_bh_delete(acb->bh);
4602 acb->bh = NULL;
4603 qemu_aio_release(acb);
4604 }
4605
4606 static const AIOCBInfo bdrv_em_aiocb_info = {
4607 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4608 .cancel = bdrv_aio_cancel_em,
4609 };
4610
4611 static void bdrv_aio_bh_cb(void *opaque)
4612 {
4613 BlockDriverAIOCBSync *acb = opaque;
4614
4615 if (!acb->is_write)
4616 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4617 qemu_vfree(acb->bounce);
4618 acb->common.cb(acb->common.opaque, acb->ret);
4619 qemu_bh_delete(acb->bh);
4620 acb->bh = NULL;
4621 qemu_aio_release(acb);
4622 }
4623
4624 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4625 int64_t sector_num,
4626 QEMUIOVector *qiov,
4627 int nb_sectors,
4628 BlockDriverCompletionFunc *cb,
4629 void *opaque,
4630 int is_write)
4631
4632 {
4633 BlockDriverAIOCBSync *acb;
4634
4635 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4636 acb->is_write = is_write;
4637 acb->qiov = qiov;
4638 acb->bounce = qemu_blockalign(bs, qiov->size);
4639 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4640
4641 if (is_write) {
4642 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4643 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4644 } else {
4645 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4646 }
4647
4648 qemu_bh_schedule(acb->bh);
4649
4650 return &acb->common;
4651 }
4652
4653 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4654 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4655 BlockDriverCompletionFunc *cb, void *opaque)
4656 {
4657 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4658 }
4659
4660 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4661 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4662 BlockDriverCompletionFunc *cb, void *opaque)
4663 {
4664 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4665 }
4666
4667
4668 typedef struct BlockDriverAIOCBCoroutine {
4669 BlockDriverAIOCB common;
4670 BlockRequest req;
4671 bool is_write;
4672 bool *done;
4673 QEMUBH* bh;
4674 } BlockDriverAIOCBCoroutine;
4675
4676 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4677 {
4678 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4679 BlockDriverAIOCBCoroutine *acb =
4680 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4681 bool done = false;
4682
4683 acb->done = &done;
4684 while (!done) {
4685 aio_poll(aio_context, true);
4686 }
4687 }
4688
4689 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4690 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4691 .cancel = bdrv_aio_co_cancel_em,
4692 };
4693
4694 static void bdrv_co_em_bh(void *opaque)
4695 {
4696 BlockDriverAIOCBCoroutine *acb = opaque;
4697
4698 acb->common.cb(acb->common.opaque, acb->req.error);
4699
4700 if (acb->done) {
4701 *acb->done = true;
4702 }
4703
4704 qemu_bh_delete(acb->bh);
4705 qemu_aio_release(acb);
4706 }
4707
4708 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4709 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4710 {
4711 BlockDriverAIOCBCoroutine *acb = opaque;
4712 BlockDriverState *bs = acb->common.bs;
4713
4714 if (!acb->is_write) {
4715 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4716 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4717 } else {
4718 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4719 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4720 }
4721
4722 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4723 qemu_bh_schedule(acb->bh);
4724 }
4725
4726 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4727 int64_t sector_num,
4728 QEMUIOVector *qiov,
4729 int nb_sectors,
4730 BdrvRequestFlags flags,
4731 BlockDriverCompletionFunc *cb,
4732 void *opaque,
4733 bool is_write)
4734 {
4735 Coroutine *co;
4736 BlockDriverAIOCBCoroutine *acb;
4737
4738 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4739 acb->req.sector = sector_num;
4740 acb->req.nb_sectors = nb_sectors;
4741 acb->req.qiov = qiov;
4742 acb->req.flags = flags;
4743 acb->is_write = is_write;
4744 acb->done = NULL;
4745
4746 co = qemu_coroutine_create(bdrv_co_do_rw);
4747 qemu_coroutine_enter(co, acb);
4748
4749 return &acb->common;
4750 }
4751
4752 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4753 {
4754 BlockDriverAIOCBCoroutine *acb = opaque;
4755 BlockDriverState *bs = acb->common.bs;
4756
4757 acb->req.error = bdrv_co_flush(bs);
4758 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4759 qemu_bh_schedule(acb->bh);
4760 }
4761
4762 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4763 BlockDriverCompletionFunc *cb, void *opaque)
4764 {
4765 trace_bdrv_aio_flush(bs, opaque);
4766
4767 Coroutine *co;
4768 BlockDriverAIOCBCoroutine *acb;
4769
4770 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4771 acb->done = NULL;
4772
4773 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4774 qemu_coroutine_enter(co, acb);
4775
4776 return &acb->common;
4777 }
4778
4779 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4780 {
4781 BlockDriverAIOCBCoroutine *acb = opaque;
4782 BlockDriverState *bs = acb->common.bs;
4783
4784 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4785 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4786 qemu_bh_schedule(acb->bh);
4787 }
4788
4789 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4790 int64_t sector_num, int nb_sectors,
4791 BlockDriverCompletionFunc *cb, void *opaque)
4792 {
4793 Coroutine *co;
4794 BlockDriverAIOCBCoroutine *acb;
4795
4796 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4797
4798 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4799 acb->req.sector = sector_num;
4800 acb->req.nb_sectors = nb_sectors;
4801 acb->done = NULL;
4802 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4803 qemu_coroutine_enter(co, acb);
4804
4805 return &acb->common;
4806 }
4807
4808 void bdrv_init(void)
4809 {
4810 module_call_init(MODULE_INIT_BLOCK);
4811 }
4812
4813 void bdrv_init_with_whitelist(void)
4814 {
4815 use_bdrv_whitelist = 1;
4816 bdrv_init();
4817 }
4818
4819 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4820 BlockDriverCompletionFunc *cb, void *opaque)
4821 {
4822 BlockDriverAIOCB *acb;
4823
4824 acb = g_slice_alloc(aiocb_info->aiocb_size);
4825 acb->aiocb_info = aiocb_info;
4826 acb->bs = bs;
4827 acb->cb = cb;
4828 acb->opaque = opaque;
4829 return acb;
4830 }
4831
4832 void qemu_aio_release(void *p)
4833 {
4834 BlockDriverAIOCB *acb = p;
4835 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4836 }
4837
4838 /**************************************************************/
4839 /* Coroutine block device emulation */
4840
4841 typedef struct CoroutineIOCompletion {
4842 Coroutine *coroutine;
4843 int ret;
4844 } CoroutineIOCompletion;
4845
4846 static void bdrv_co_io_em_complete(void *opaque, int ret)
4847 {
4848 CoroutineIOCompletion *co = opaque;
4849
4850 co->ret = ret;
4851 qemu_coroutine_enter(co->coroutine, NULL);
4852 }
4853
4854 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4855 int nb_sectors, QEMUIOVector *iov,
4856 bool is_write)
4857 {
4858 CoroutineIOCompletion co = {
4859 .coroutine = qemu_coroutine_self(),
4860 };
4861 BlockDriverAIOCB *acb;
4862
4863 if (is_write) {
4864 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4865 bdrv_co_io_em_complete, &co);
4866 } else {
4867 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4868 bdrv_co_io_em_complete, &co);
4869 }
4870
4871 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4872 if (!acb) {
4873 return -EIO;
4874 }
4875 qemu_coroutine_yield();
4876
4877 return co.ret;
4878 }
4879
4880 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4881 int64_t sector_num, int nb_sectors,
4882 QEMUIOVector *iov)
4883 {
4884 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4885 }
4886
4887 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4888 int64_t sector_num, int nb_sectors,
4889 QEMUIOVector *iov)
4890 {
4891 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4892 }
4893
4894 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4895 {
4896 RwCo *rwco = opaque;
4897
4898 rwco->ret = bdrv_co_flush(rwco->bs);
4899 }
4900
4901 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4902 {
4903 int ret;
4904
4905 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4906 return 0;
4907 }
4908
4909 /* Write back cached data to the OS even with cache=unsafe */
4910 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4911 if (bs->drv->bdrv_co_flush_to_os) {
4912 ret = bs->drv->bdrv_co_flush_to_os(bs);
4913 if (ret < 0) {
4914 return ret;
4915 }
4916 }
4917
4918 /* But don't actually force it to the disk with cache=unsafe */
4919 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4920 goto flush_parent;
4921 }
4922
4923 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4924 if (bs->drv->bdrv_co_flush_to_disk) {
4925 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4926 } else if (bs->drv->bdrv_aio_flush) {
4927 BlockDriverAIOCB *acb;
4928 CoroutineIOCompletion co = {
4929 .coroutine = qemu_coroutine_self(),
4930 };
4931
4932 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4933 if (acb == NULL) {
4934 ret = -EIO;
4935 } else {
4936 qemu_coroutine_yield();
4937 ret = co.ret;
4938 }
4939 } else {
4940 /*
4941 * Some block drivers always operate in either writethrough or unsafe
4942 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4943 * know how the server works (because the behaviour is hardcoded or
4944 * depends on server-side configuration), so we can't ensure that
4945 * everything is safe on disk. Returning an error doesn't work because
4946 * that would break guests even if the server operates in writethrough
4947 * mode.
4948 *
4949 * Let's hope the user knows what he's doing.
4950 */
4951 ret = 0;
4952 }
4953 if (ret < 0) {
4954 return ret;
4955 }
4956
4957 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4958 * in the case of cache=unsafe, so there are no useless flushes.
4959 */
4960 flush_parent:
4961 return bdrv_co_flush(bs->file);
4962 }
4963
4964 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4965 {
4966 Error *local_err = NULL;
4967 int ret;
4968
4969 if (!bs->drv) {
4970 return;
4971 }
4972
4973 if (bs->drv->bdrv_invalidate_cache) {
4974 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4975 } else if (bs->file) {
4976 bdrv_invalidate_cache(bs->file, &local_err);
4977 }
4978 if (local_err) {
4979 error_propagate(errp, local_err);
4980 return;
4981 }
4982
4983 ret = refresh_total_sectors(bs, bs->total_sectors);
4984 if (ret < 0) {
4985 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4986 return;
4987 }
4988 }
4989
4990 void bdrv_invalidate_cache_all(Error **errp)
4991 {
4992 BlockDriverState *bs;
4993 Error *local_err = NULL;
4994
4995 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4996 AioContext *aio_context = bdrv_get_aio_context(bs);
4997
4998 aio_context_acquire(aio_context);
4999 bdrv_invalidate_cache(bs, &local_err);
5000 aio_context_release(aio_context);
5001 if (local_err) {
5002 error_propagate(errp, local_err);
5003 return;
5004 }
5005 }
5006 }
5007
5008 void bdrv_clear_incoming_migration_all(void)
5009 {
5010 BlockDriverState *bs;
5011
5012 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5013 AioContext *aio_context = bdrv_get_aio_context(bs);
5014
5015 aio_context_acquire(aio_context);
5016 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5017 aio_context_release(aio_context);
5018 }
5019 }
5020
5021 int bdrv_flush(BlockDriverState *bs)
5022 {
5023 Coroutine *co;
5024 RwCo rwco = {
5025 .bs = bs,
5026 .ret = NOT_DONE,
5027 };
5028
5029 if (qemu_in_coroutine()) {
5030 /* Fast-path if already in coroutine context */
5031 bdrv_flush_co_entry(&rwco);
5032 } else {
5033 AioContext *aio_context = bdrv_get_aio_context(bs);
5034
5035 co = qemu_coroutine_create(bdrv_flush_co_entry);
5036 qemu_coroutine_enter(co, &rwco);
5037 while (rwco.ret == NOT_DONE) {
5038 aio_poll(aio_context, true);
5039 }
5040 }
5041
5042 return rwco.ret;
5043 }
5044
5045 typedef struct DiscardCo {
5046 BlockDriverState *bs;
5047 int64_t sector_num;
5048 int nb_sectors;
5049 int ret;
5050 } DiscardCo;
5051 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5052 {
5053 DiscardCo *rwco = opaque;
5054
5055 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5056 }
5057
5058 /* if no limit is specified in the BlockLimits use a default
5059 * of 32768 512-byte sectors (16 MiB) per request.
5060 */
5061 #define MAX_DISCARD_DEFAULT 32768
5062
5063 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5064 int nb_sectors)
5065 {
5066 int max_discard;
5067
5068 if (!bs->drv) {
5069 return -ENOMEDIUM;
5070 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5071 return -EIO;
5072 } else if (bs->read_only) {
5073 return -EROFS;
5074 }
5075
5076 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5077
5078 /* Do nothing if disabled. */
5079 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5080 return 0;
5081 }
5082
5083 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5084 return 0;
5085 }
5086
5087 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5088 while (nb_sectors > 0) {
5089 int ret;
5090 int num = nb_sectors;
5091
5092 /* align request */
5093 if (bs->bl.discard_alignment &&
5094 num >= bs->bl.discard_alignment &&
5095 sector_num % bs->bl.discard_alignment) {
5096 if (num > bs->bl.discard_alignment) {
5097 num = bs->bl.discard_alignment;
5098 }
5099 num -= sector_num % bs->bl.discard_alignment;
5100 }
5101
5102 /* limit request size */
5103 if (num > max_discard) {
5104 num = max_discard;
5105 }
5106
5107 if (bs->drv->bdrv_co_discard) {
5108 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5109 } else {
5110 BlockDriverAIOCB *acb;
5111 CoroutineIOCompletion co = {
5112 .coroutine = qemu_coroutine_self(),
5113 };
5114
5115 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5116 bdrv_co_io_em_complete, &co);
5117 if (acb == NULL) {
5118 return -EIO;
5119 } else {
5120 qemu_coroutine_yield();
5121 ret = co.ret;
5122 }
5123 }
5124 if (ret && ret != -ENOTSUP) {
5125 return ret;
5126 }
5127
5128 sector_num += num;
5129 nb_sectors -= num;
5130 }
5131 return 0;
5132 }
5133
5134 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5135 {
5136 Coroutine *co;
5137 DiscardCo rwco = {
5138 .bs = bs,
5139 .sector_num = sector_num,
5140 .nb_sectors = nb_sectors,
5141 .ret = NOT_DONE,
5142 };
5143
5144 if (qemu_in_coroutine()) {
5145 /* Fast-path if already in coroutine context */
5146 bdrv_discard_co_entry(&rwco);
5147 } else {
5148 AioContext *aio_context = bdrv_get_aio_context(bs);
5149
5150 co = qemu_coroutine_create(bdrv_discard_co_entry);
5151 qemu_coroutine_enter(co, &rwco);
5152 while (rwco.ret == NOT_DONE) {
5153 aio_poll(aio_context, true);
5154 }
5155 }
5156
5157 return rwco.ret;
5158 }
5159
5160 /**************************************************************/
5161 /* removable device support */
5162
5163 /**
5164 * Return TRUE if the media is present
5165 */
5166 int bdrv_is_inserted(BlockDriverState *bs)
5167 {
5168 BlockDriver *drv = bs->drv;
5169
5170 if (!drv)
5171 return 0;
5172 if (!drv->bdrv_is_inserted)
5173 return 1;
5174 return drv->bdrv_is_inserted(bs);
5175 }
5176
5177 /**
5178 * Return whether the media changed since the last call to this
5179 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5180 */
5181 int bdrv_media_changed(BlockDriverState *bs)
5182 {
5183 BlockDriver *drv = bs->drv;
5184
5185 if (drv && drv->bdrv_media_changed) {
5186 return drv->bdrv_media_changed(bs);
5187 }
5188 return -ENOTSUP;
5189 }
5190
5191 /**
5192 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5193 */
5194 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5195 {
5196 BlockDriver *drv = bs->drv;
5197
5198 if (drv && drv->bdrv_eject) {
5199 drv->bdrv_eject(bs, eject_flag);
5200 }
5201
5202 if (bs->device_name[0] != '\0') {
5203 bdrv_emit_qmp_eject_event(bs, eject_flag);
5204 }
5205 }
5206
5207 /**
5208 * Lock or unlock the media (if it is locked, the user won't be able
5209 * to eject it manually).
5210 */
5211 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5212 {
5213 BlockDriver *drv = bs->drv;
5214
5215 trace_bdrv_lock_medium(bs, locked);
5216
5217 if (drv && drv->bdrv_lock_medium) {
5218 drv->bdrv_lock_medium(bs, locked);
5219 }
5220 }
5221
5222 /* needed for generic scsi interface */
5223
5224 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5225 {
5226 BlockDriver *drv = bs->drv;
5227
5228 if (drv && drv->bdrv_ioctl)
5229 return drv->bdrv_ioctl(bs, req, buf);
5230 return -ENOTSUP;
5231 }
5232
5233 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5234 unsigned long int req, void *buf,
5235 BlockDriverCompletionFunc *cb, void *opaque)
5236 {
5237 BlockDriver *drv = bs->drv;
5238
5239 if (drv && drv->bdrv_aio_ioctl)
5240 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5241 return NULL;
5242 }
5243
5244 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5245 {
5246 bs->guest_block_size = align;
5247 }
5248
5249 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5250 {
5251 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5252 }
5253
5254 /*
5255 * Check if all memory in this vector is sector aligned.
5256 */
5257 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5258 {
5259 int i;
5260 size_t alignment = bdrv_opt_mem_align(bs);
5261
5262 for (i = 0; i < qiov->niov; i++) {
5263 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5264 return false;
5265 }
5266 if (qiov->iov[i].iov_len % alignment) {
5267 return false;
5268 }
5269 }
5270
5271 return true;
5272 }
5273
5274 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5275 Error **errp)
5276 {
5277 int64_t bitmap_size;
5278 BdrvDirtyBitmap *bitmap;
5279
5280 assert((granularity & (granularity - 1)) == 0);
5281
5282 granularity >>= BDRV_SECTOR_BITS;
5283 assert(granularity);
5284 bitmap_size = bdrv_getlength(bs);
5285 if (bitmap_size < 0) {
5286 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5287 errno = -bitmap_size;
5288 return NULL;
5289 }
5290 bitmap_size >>= BDRV_SECTOR_BITS;
5291 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5292 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5293 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5294 return bitmap;
5295 }
5296
5297 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5298 {
5299 BdrvDirtyBitmap *bm, *next;
5300 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5301 if (bm == bitmap) {
5302 QLIST_REMOVE(bitmap, list);
5303 hbitmap_free(bitmap->bitmap);
5304 g_free(bitmap);
5305 return;
5306 }
5307 }
5308 }
5309
5310 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5311 {
5312 BdrvDirtyBitmap *bm;
5313 BlockDirtyInfoList *list = NULL;
5314 BlockDirtyInfoList **plist = &list;
5315
5316 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5317 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5318 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5319 info->count = bdrv_get_dirty_count(bs, bm);
5320 info->granularity =
5321 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5322 entry->value = info;
5323 *plist = entry;
5324 plist = &entry->next;
5325 }
5326
5327 return list;
5328 }
5329
5330 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5331 {
5332 if (bitmap) {
5333 return hbitmap_get(bitmap->bitmap, sector);
5334 } else {
5335 return 0;
5336 }
5337 }
5338
5339 void bdrv_dirty_iter_init(BlockDriverState *bs,
5340 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5341 {
5342 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5343 }
5344
5345 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5346 int nr_sectors)
5347 {
5348 BdrvDirtyBitmap *bitmap;
5349 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5350 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5351 }
5352 }
5353
5354 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5355 {
5356 BdrvDirtyBitmap *bitmap;
5357 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5358 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5359 }
5360 }
5361
5362 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5363 {
5364 return hbitmap_count(bitmap->bitmap);
5365 }
5366
5367 /* Get a reference to bs */
5368 void bdrv_ref(BlockDriverState *bs)
5369 {
5370 bs->refcnt++;
5371 }
5372
5373 /* Release a previously grabbed reference to bs.
5374 * If after releasing, reference count is zero, the BlockDriverState is
5375 * deleted. */
5376 void bdrv_unref(BlockDriverState *bs)
5377 {
5378 assert(bs->refcnt > 0);
5379 if (--bs->refcnt == 0) {
5380 bdrv_delete(bs);
5381 }
5382 }
5383
5384 struct BdrvOpBlocker {
5385 Error *reason;
5386 QLIST_ENTRY(BdrvOpBlocker) list;
5387 };
5388
5389 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5390 {
5391 BdrvOpBlocker *blocker;
5392 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5393 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5394 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5395 if (errp) {
5396 error_setg(errp, "Device '%s' is busy: %s",
5397 bs->device_name, error_get_pretty(blocker->reason));
5398 }
5399 return true;
5400 }
5401 return false;
5402 }
5403
5404 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5405 {
5406 BdrvOpBlocker *blocker;
5407 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5408
5409 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5410 blocker->reason = reason;
5411 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5412 }
5413
5414 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5415 {
5416 BdrvOpBlocker *blocker, *next;
5417 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5418 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5419 if (blocker->reason == reason) {
5420 QLIST_REMOVE(blocker, list);
5421 g_free(blocker);
5422 }
5423 }
5424 }
5425
5426 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5427 {
5428 int i;
5429 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5430 bdrv_op_block(bs, i, reason);
5431 }
5432 }
5433
5434 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5435 {
5436 int i;
5437 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5438 bdrv_op_unblock(bs, i, reason);
5439 }
5440 }
5441
5442 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5443 {
5444 int i;
5445
5446 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5447 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5448 return false;
5449 }
5450 }
5451 return true;
5452 }
5453
5454 void bdrv_iostatus_enable(BlockDriverState *bs)
5455 {
5456 bs->iostatus_enabled = true;
5457 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5458 }
5459
5460 /* The I/O status is only enabled if the drive explicitly
5461 * enables it _and_ the VM is configured to stop on errors */
5462 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5463 {
5464 return (bs->iostatus_enabled &&
5465 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5466 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5467 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5468 }
5469
5470 void bdrv_iostatus_disable(BlockDriverState *bs)
5471 {
5472 bs->iostatus_enabled = false;
5473 }
5474
5475 void bdrv_iostatus_reset(BlockDriverState *bs)
5476 {
5477 if (bdrv_iostatus_is_enabled(bs)) {
5478 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5479 if (bs->job) {
5480 block_job_iostatus_reset(bs->job);
5481 }
5482 }
5483 }
5484
5485 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5486 {
5487 assert(bdrv_iostatus_is_enabled(bs));
5488 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5489 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5490 BLOCK_DEVICE_IO_STATUS_FAILED;
5491 }
5492 }
5493
5494 void
5495 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5496 enum BlockAcctType type)
5497 {
5498 assert(type < BDRV_MAX_IOTYPE);
5499
5500 cookie->bytes = bytes;
5501 cookie->start_time_ns = get_clock();
5502 cookie->type = type;
5503 }
5504
5505 void
5506 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5507 {
5508 assert(cookie->type < BDRV_MAX_IOTYPE);
5509
5510 bs->nr_bytes[cookie->type] += cookie->bytes;
5511 bs->nr_ops[cookie->type]++;
5512 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5513 }
5514
5515 void bdrv_img_create(const char *filename, const char *fmt,
5516 const char *base_filename, const char *base_fmt,
5517 char *options, uint64_t img_size, int flags,
5518 Error **errp, bool quiet)
5519 {
5520 QEMUOptionParameter *param = NULL, *create_options = NULL;
5521 QEMUOptionParameter *backing_fmt, *backing_file, *size;
5522 BlockDriver *drv, *proto_drv;
5523 BlockDriver *backing_drv = NULL;
5524 Error *local_err = NULL;
5525 int ret = 0;
5526
5527 /* Find driver and parse its options */
5528 drv = bdrv_find_format(fmt);
5529 if (!drv) {
5530 error_setg(errp, "Unknown file format '%s'", fmt);
5531 return;
5532 }
5533
5534 proto_drv = bdrv_find_protocol(filename, true);
5535 if (!proto_drv) {
5536 error_setg(errp, "Unknown protocol '%s'", filename);
5537 return;
5538 }
5539
5540 create_options = append_option_parameters(create_options,
5541 drv->create_options);
5542 create_options = append_option_parameters(create_options,
5543 proto_drv->create_options);
5544
5545 /* Create parameter list with default values */
5546 param = parse_option_parameters("", create_options, param);
5547
5548 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
5549
5550 /* Parse -o options */
5551 if (options) {
5552 param = parse_option_parameters(options, create_options, param);
5553 if (param == NULL) {
5554 error_setg(errp, "Invalid options for file format '%s'.", fmt);
5555 goto out;
5556 }
5557 }
5558
5559 if (base_filename) {
5560 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
5561 base_filename)) {
5562 error_setg(errp, "Backing file not supported for file format '%s'",
5563 fmt);
5564 goto out;
5565 }
5566 }
5567
5568 if (base_fmt) {
5569 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5570 error_setg(errp, "Backing file format not supported for file "
5571 "format '%s'", fmt);
5572 goto out;
5573 }
5574 }
5575
5576 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
5577 if (backing_file && backing_file->value.s) {
5578 if (!strcmp(filename, backing_file->value.s)) {
5579 error_setg(errp, "Error: Trying to create an image with the "
5580 "same filename as the backing file");
5581 goto out;
5582 }
5583 }
5584
5585 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
5586 if (backing_fmt && backing_fmt->value.s) {
5587 backing_drv = bdrv_find_format(backing_fmt->value.s);
5588 if (!backing_drv) {
5589 error_setg(errp, "Unknown backing file format '%s'",
5590 backing_fmt->value.s);
5591 goto out;
5592 }
5593 }
5594
5595 // The size for the image must always be specified, with one exception:
5596 // If we are using a backing file, we can obtain the size from there
5597 size = get_option_parameter(param, BLOCK_OPT_SIZE);
5598 if (size && size->value.n == -1) {
5599 if (backing_file && backing_file->value.s) {
5600 BlockDriverState *bs;
5601 uint64_t size;
5602 char buf[32];
5603 int back_flags;
5604
5605 /* backing files always opened read-only */
5606 back_flags =
5607 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5608
5609 bs = NULL;
5610 ret = bdrv_open(&bs, backing_file->value.s, NULL, NULL, back_flags,
5611 backing_drv, &local_err);
5612 if (ret < 0) {
5613 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5614 backing_file->value.s,
5615 error_get_pretty(local_err));
5616 error_free(local_err);
5617 local_err = NULL;
5618 goto out;
5619 }
5620 bdrv_get_geometry(bs, &size);
5621 size *= 512;
5622
5623 snprintf(buf, sizeof(buf), "%" PRId64, size);
5624 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
5625
5626 bdrv_unref(bs);
5627 } else {
5628 error_setg(errp, "Image creation needs a size parameter");
5629 goto out;
5630 }
5631 }
5632
5633 if (!quiet) {
5634 printf("Formatting '%s', fmt=%s ", filename, fmt);
5635 print_option_parameters(param);
5636 puts("");
5637 }
5638 ret = bdrv_create(drv, filename, param, &local_err);
5639 if (ret == -EFBIG) {
5640 /* This is generally a better message than whatever the driver would
5641 * deliver (especially because of the cluster_size_hint), since that
5642 * is most probably not much different from "image too large". */
5643 const char *cluster_size_hint = "";
5644 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
5645 cluster_size_hint = " (try using a larger cluster size)";
5646 }
5647 error_setg(errp, "The image size is too large for file format '%s'"
5648 "%s", fmt, cluster_size_hint);
5649 error_free(local_err);
5650 local_err = NULL;
5651 }
5652
5653 out:
5654 free_option_parameters(create_options);
5655 free_option_parameters(param);
5656
5657 if (local_err) {
5658 error_propagate(errp, local_err);
5659 }
5660 }
5661
5662 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5663 {
5664 /* Currently BlockDriverState always uses the main loop AioContext */
5665 return qemu_get_aio_context();
5666 }
5667
5668 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5669 NotifierWithReturn *notifier)
5670 {
5671 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5672 }
5673
5674 int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
5675 {
5676 if (bs->drv->bdrv_amend_options == NULL) {
5677 return -ENOTSUP;
5678 }
5679 return bs->drv->bdrv_amend_options(bs, options);
5680 }
5681
5682 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5683 * of block filter and by bdrv_is_first_non_filter.
5684 * It is used to test if the given bs is the candidate or recurse more in the
5685 * node graph.
5686 */
5687 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5688 BlockDriverState *candidate)
5689 {
5690 /* return false if basic checks fails */
5691 if (!bs || !bs->drv) {
5692 return false;
5693 }
5694
5695 /* the code reached a non block filter driver -> check if the bs is
5696 * the same as the candidate. It's the recursion termination condition.
5697 */
5698 if (!bs->drv->is_filter) {
5699 return bs == candidate;
5700 }
5701 /* Down this path the driver is a block filter driver */
5702
5703 /* If the block filter recursion method is defined use it to recurse down
5704 * the node graph.
5705 */
5706 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5707 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5708 }
5709
5710 /* the driver is a block filter but don't allow to recurse -> return false
5711 */
5712 return false;
5713 }
5714
5715 /* This function checks if the candidate is the first non filter bs down it's
5716 * bs chain. Since we don't have pointers to parents it explore all bs chains
5717 * from the top. Some filters can choose not to pass down the recursion.
5718 */
5719 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5720 {
5721 BlockDriverState *bs;
5722
5723 /* walk down the bs forest recursively */
5724 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5725 bool perm;
5726
5727 /* try to recurse in this top level bs */
5728 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5729
5730 /* candidate is the first non filter */
5731 if (perm) {
5732 return true;
5733 }
5734 }
5735
5736 return false;
5737 }