]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
block: add .bdrv_co_is_allocated()
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qjson.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
34
35 #ifdef CONFIG_BSD
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
40 #ifndef __DragonFly__
41 #include <sys/disk.h>
42 #endif
43 #endif
44
45 #ifdef _WIN32
46 #include <windows.h>
47 #endif
48
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
51 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
52 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
53 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
54 BlockDriverCompletionFunc *cb, void *opaque);
55 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
56 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
57 BlockDriverCompletionFunc *cb, void *opaque);
58 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
59 int64_t sector_num, int nb_sectors,
60 QEMUIOVector *iov);
61 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
62 int64_t sector_num, int nb_sectors,
63 QEMUIOVector *iov);
64 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
65 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
66 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
68 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
69 int64_t sector_num,
70 QEMUIOVector *qiov,
71 int nb_sectors,
72 BlockDriverCompletionFunc *cb,
73 void *opaque,
74 bool is_write);
75 static void coroutine_fn bdrv_co_do_rw(void *opaque);
76
77 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
78 bool is_write, double elapsed_time, uint64_t *wait);
79 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
80 double elapsed_time, uint64_t *wait);
81 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
82 bool is_write, int64_t *wait);
83
84 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
85 QTAILQ_HEAD_INITIALIZER(bdrv_states);
86
87 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
88 QLIST_HEAD_INITIALIZER(bdrv_drivers);
89
90 /* The device to use for VM snapshots */
91 static BlockDriverState *bs_snapshots;
92
93 /* If non-zero, use only whitelisted block drivers */
94 static int use_bdrv_whitelist;
95
96 #ifdef _WIN32
97 static int is_windows_drive_prefix(const char *filename)
98 {
99 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
100 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
101 filename[1] == ':');
102 }
103
104 int is_windows_drive(const char *filename)
105 {
106 if (is_windows_drive_prefix(filename) &&
107 filename[2] == '\0')
108 return 1;
109 if (strstart(filename, "\\\\.\\", NULL) ||
110 strstart(filename, "//./", NULL))
111 return 1;
112 return 0;
113 }
114 #endif
115
116 /* throttling disk I/O limits */
117 void bdrv_io_limits_disable(BlockDriverState *bs)
118 {
119 bs->io_limits_enabled = false;
120
121 while (qemu_co_queue_next(&bs->throttled_reqs));
122
123 if (bs->block_timer) {
124 qemu_del_timer(bs->block_timer);
125 qemu_free_timer(bs->block_timer);
126 bs->block_timer = NULL;
127 }
128
129 bs->slice_start = 0;
130 bs->slice_end = 0;
131 bs->slice_time = 0;
132 memset(&bs->io_base, 0, sizeof(bs->io_base));
133 }
134
135 static void bdrv_block_timer(void *opaque)
136 {
137 BlockDriverState *bs = opaque;
138
139 qemu_co_queue_next(&bs->throttled_reqs);
140 }
141
142 void bdrv_io_limits_enable(BlockDriverState *bs)
143 {
144 qemu_co_queue_init(&bs->throttled_reqs);
145 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
146 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
147 bs->slice_start = qemu_get_clock_ns(vm_clock);
148 bs->slice_end = bs->slice_start + bs->slice_time;
149 memset(&bs->io_base, 0, sizeof(bs->io_base));
150 bs->io_limits_enabled = true;
151 }
152
153 bool bdrv_io_limits_enabled(BlockDriverState *bs)
154 {
155 BlockIOLimit *io_limits = &bs->io_limits;
156 return io_limits->bps[BLOCK_IO_LIMIT_READ]
157 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
158 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
159 || io_limits->iops[BLOCK_IO_LIMIT_READ]
160 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
161 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
162 }
163
164 static void bdrv_io_limits_intercept(BlockDriverState *bs,
165 bool is_write, int nb_sectors)
166 {
167 int64_t wait_time = -1;
168
169 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
170 qemu_co_queue_wait(&bs->throttled_reqs);
171 }
172
173 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
174 * throttled requests will not be dequeued until the current request is
175 * allowed to be serviced. So if the current request still exceeds the
176 * limits, it will be inserted to the head. All requests followed it will
177 * be still in throttled_reqs queue.
178 */
179
180 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
181 qemu_mod_timer(bs->block_timer,
182 wait_time + qemu_get_clock_ns(vm_clock));
183 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
184 }
185
186 qemu_co_queue_next(&bs->throttled_reqs);
187 }
188
189 /* check if the path starts with "<protocol>:" */
190 static int path_has_protocol(const char *path)
191 {
192 #ifdef _WIN32
193 if (is_windows_drive(path) ||
194 is_windows_drive_prefix(path)) {
195 return 0;
196 }
197 #endif
198
199 return strchr(path, ':') != NULL;
200 }
201
202 int path_is_absolute(const char *path)
203 {
204 const char *p;
205 #ifdef _WIN32
206 /* specific case for names like: "\\.\d:" */
207 if (*path == '/' || *path == '\\')
208 return 1;
209 #endif
210 p = strchr(path, ':');
211 if (p)
212 p++;
213 else
214 p = path;
215 #ifdef _WIN32
216 return (*p == '/' || *p == '\\');
217 #else
218 return (*p == '/');
219 #endif
220 }
221
222 /* if filename is absolute, just copy it to dest. Otherwise, build a
223 path to it by considering it is relative to base_path. URL are
224 supported. */
225 void path_combine(char *dest, int dest_size,
226 const char *base_path,
227 const char *filename)
228 {
229 const char *p, *p1;
230 int len;
231
232 if (dest_size <= 0)
233 return;
234 if (path_is_absolute(filename)) {
235 pstrcpy(dest, dest_size, filename);
236 } else {
237 p = strchr(base_path, ':');
238 if (p)
239 p++;
240 else
241 p = base_path;
242 p1 = strrchr(base_path, '/');
243 #ifdef _WIN32
244 {
245 const char *p2;
246 p2 = strrchr(base_path, '\\');
247 if (!p1 || p2 > p1)
248 p1 = p2;
249 }
250 #endif
251 if (p1)
252 p1++;
253 else
254 p1 = base_path;
255 if (p1 > p)
256 p = p1;
257 len = p - base_path;
258 if (len > dest_size - 1)
259 len = dest_size - 1;
260 memcpy(dest, base_path, len);
261 dest[len] = '\0';
262 pstrcat(dest, dest_size, filename);
263 }
264 }
265
266 void bdrv_register(BlockDriver *bdrv)
267 {
268 /* Block drivers without coroutine functions need emulation */
269 if (!bdrv->bdrv_co_readv) {
270 bdrv->bdrv_co_readv = bdrv_co_readv_em;
271 bdrv->bdrv_co_writev = bdrv_co_writev_em;
272
273 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
274 * the block driver lacks aio we need to emulate that too.
275 */
276 if (!bdrv->bdrv_aio_readv) {
277 /* add AIO emulation layer */
278 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
279 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
280 }
281 }
282
283 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
284 }
285
286 /* create a new block device (by default it is empty) */
287 BlockDriverState *bdrv_new(const char *device_name)
288 {
289 BlockDriverState *bs;
290
291 bs = g_malloc0(sizeof(BlockDriverState));
292 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
293 if (device_name[0] != '\0') {
294 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
295 }
296 bdrv_iostatus_disable(bs);
297 return bs;
298 }
299
300 BlockDriver *bdrv_find_format(const char *format_name)
301 {
302 BlockDriver *drv1;
303 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
304 if (!strcmp(drv1->format_name, format_name)) {
305 return drv1;
306 }
307 }
308 return NULL;
309 }
310
311 static int bdrv_is_whitelisted(BlockDriver *drv)
312 {
313 static const char *whitelist[] = {
314 CONFIG_BDRV_WHITELIST
315 };
316 const char **p;
317
318 if (!whitelist[0])
319 return 1; /* no whitelist, anything goes */
320
321 for (p = whitelist; *p; p++) {
322 if (!strcmp(drv->format_name, *p)) {
323 return 1;
324 }
325 }
326 return 0;
327 }
328
329 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
330 {
331 BlockDriver *drv = bdrv_find_format(format_name);
332 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
333 }
334
335 int bdrv_create(BlockDriver *drv, const char* filename,
336 QEMUOptionParameter *options)
337 {
338 if (!drv->bdrv_create)
339 return -ENOTSUP;
340
341 return drv->bdrv_create(filename, options);
342 }
343
344 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
345 {
346 BlockDriver *drv;
347
348 drv = bdrv_find_protocol(filename);
349 if (drv == NULL) {
350 return -ENOENT;
351 }
352
353 return bdrv_create(drv, filename, options);
354 }
355
356 #ifdef _WIN32
357 void get_tmp_filename(char *filename, int size)
358 {
359 char temp_dir[MAX_PATH];
360
361 GetTempPath(MAX_PATH, temp_dir);
362 GetTempFileName(temp_dir, "qem", 0, filename);
363 }
364 #else
365 void get_tmp_filename(char *filename, int size)
366 {
367 int fd;
368 const char *tmpdir;
369 /* XXX: race condition possible */
370 tmpdir = getenv("TMPDIR");
371 if (!tmpdir)
372 tmpdir = "/tmp";
373 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
374 fd = mkstemp(filename);
375 close(fd);
376 }
377 #endif
378
379 /*
380 * Detect host devices. By convention, /dev/cdrom[N] is always
381 * recognized as a host CDROM.
382 */
383 static BlockDriver *find_hdev_driver(const char *filename)
384 {
385 int score_max = 0, score;
386 BlockDriver *drv = NULL, *d;
387
388 QLIST_FOREACH(d, &bdrv_drivers, list) {
389 if (d->bdrv_probe_device) {
390 score = d->bdrv_probe_device(filename);
391 if (score > score_max) {
392 score_max = score;
393 drv = d;
394 }
395 }
396 }
397
398 return drv;
399 }
400
401 BlockDriver *bdrv_find_protocol(const char *filename)
402 {
403 BlockDriver *drv1;
404 char protocol[128];
405 int len;
406 const char *p;
407
408 /* TODO Drivers without bdrv_file_open must be specified explicitly */
409
410 /*
411 * XXX(hch): we really should not let host device detection
412 * override an explicit protocol specification, but moving this
413 * later breaks access to device names with colons in them.
414 * Thanks to the brain-dead persistent naming schemes on udev-
415 * based Linux systems those actually are quite common.
416 */
417 drv1 = find_hdev_driver(filename);
418 if (drv1) {
419 return drv1;
420 }
421
422 if (!path_has_protocol(filename)) {
423 return bdrv_find_format("file");
424 }
425 p = strchr(filename, ':');
426 assert(p != NULL);
427 len = p - filename;
428 if (len > sizeof(protocol) - 1)
429 len = sizeof(protocol) - 1;
430 memcpy(protocol, filename, len);
431 protocol[len] = '\0';
432 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
433 if (drv1->protocol_name &&
434 !strcmp(drv1->protocol_name, protocol)) {
435 return drv1;
436 }
437 }
438 return NULL;
439 }
440
441 static int find_image_format(const char *filename, BlockDriver **pdrv)
442 {
443 int ret, score, score_max;
444 BlockDriver *drv1, *drv;
445 uint8_t buf[2048];
446 BlockDriverState *bs;
447
448 ret = bdrv_file_open(&bs, filename, 0);
449 if (ret < 0) {
450 *pdrv = NULL;
451 return ret;
452 }
453
454 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
455 if (bs->sg || !bdrv_is_inserted(bs)) {
456 bdrv_delete(bs);
457 drv = bdrv_find_format("raw");
458 if (!drv) {
459 ret = -ENOENT;
460 }
461 *pdrv = drv;
462 return ret;
463 }
464
465 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
466 bdrv_delete(bs);
467 if (ret < 0) {
468 *pdrv = NULL;
469 return ret;
470 }
471
472 score_max = 0;
473 drv = NULL;
474 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
475 if (drv1->bdrv_probe) {
476 score = drv1->bdrv_probe(buf, ret, filename);
477 if (score > score_max) {
478 score_max = score;
479 drv = drv1;
480 }
481 }
482 }
483 if (!drv) {
484 ret = -ENOENT;
485 }
486 *pdrv = drv;
487 return ret;
488 }
489
490 /**
491 * Set the current 'total_sectors' value
492 */
493 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
494 {
495 BlockDriver *drv = bs->drv;
496
497 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
498 if (bs->sg)
499 return 0;
500
501 /* query actual device if possible, otherwise just trust the hint */
502 if (drv->bdrv_getlength) {
503 int64_t length = drv->bdrv_getlength(bs);
504 if (length < 0) {
505 return length;
506 }
507 hint = length >> BDRV_SECTOR_BITS;
508 }
509
510 bs->total_sectors = hint;
511 return 0;
512 }
513
514 /**
515 * Set open flags for a given cache mode
516 *
517 * Return 0 on success, -1 if the cache mode was invalid.
518 */
519 int bdrv_parse_cache_flags(const char *mode, int *flags)
520 {
521 *flags &= ~BDRV_O_CACHE_MASK;
522
523 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
524 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
525 } else if (!strcmp(mode, "directsync")) {
526 *flags |= BDRV_O_NOCACHE;
527 } else if (!strcmp(mode, "writeback")) {
528 *flags |= BDRV_O_CACHE_WB;
529 } else if (!strcmp(mode, "unsafe")) {
530 *flags |= BDRV_O_CACHE_WB;
531 *flags |= BDRV_O_NO_FLUSH;
532 } else if (!strcmp(mode, "writethrough")) {
533 /* this is the default */
534 } else {
535 return -1;
536 }
537
538 return 0;
539 }
540
541 /*
542 * Common part for opening disk images and files
543 */
544 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
545 int flags, BlockDriver *drv)
546 {
547 int ret, open_flags;
548
549 assert(drv != NULL);
550
551 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
552
553 bs->file = NULL;
554 bs->total_sectors = 0;
555 bs->encrypted = 0;
556 bs->valid_key = 0;
557 bs->sg = 0;
558 bs->open_flags = flags;
559 bs->growable = 0;
560 bs->buffer_alignment = 512;
561
562 pstrcpy(bs->filename, sizeof(bs->filename), filename);
563 bs->backing_file[0] = '\0';
564
565 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
566 return -ENOTSUP;
567 }
568
569 bs->drv = drv;
570 bs->opaque = g_malloc0(drv->instance_size);
571
572 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
573
574 /*
575 * Clear flags that are internal to the block layer before opening the
576 * image.
577 */
578 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
579
580 /*
581 * Snapshots should be writable.
582 */
583 if (bs->is_temporary) {
584 open_flags |= BDRV_O_RDWR;
585 }
586
587 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
588
589 /* Open the image, either directly or using a protocol */
590 if (drv->bdrv_file_open) {
591 ret = drv->bdrv_file_open(bs, filename, open_flags);
592 } else {
593 ret = bdrv_file_open(&bs->file, filename, open_flags);
594 if (ret >= 0) {
595 ret = drv->bdrv_open(bs, open_flags);
596 }
597 }
598
599 if (ret < 0) {
600 goto free_and_fail;
601 }
602
603 ret = refresh_total_sectors(bs, bs->total_sectors);
604 if (ret < 0) {
605 goto free_and_fail;
606 }
607
608 #ifndef _WIN32
609 if (bs->is_temporary) {
610 unlink(filename);
611 }
612 #endif
613 return 0;
614
615 free_and_fail:
616 if (bs->file) {
617 bdrv_delete(bs->file);
618 bs->file = NULL;
619 }
620 g_free(bs->opaque);
621 bs->opaque = NULL;
622 bs->drv = NULL;
623 return ret;
624 }
625
626 /*
627 * Opens a file using a protocol (file, host_device, nbd, ...)
628 */
629 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
630 {
631 BlockDriverState *bs;
632 BlockDriver *drv;
633 int ret;
634
635 drv = bdrv_find_protocol(filename);
636 if (!drv) {
637 return -ENOENT;
638 }
639
640 bs = bdrv_new("");
641 ret = bdrv_open_common(bs, filename, flags, drv);
642 if (ret < 0) {
643 bdrv_delete(bs);
644 return ret;
645 }
646 bs->growable = 1;
647 *pbs = bs;
648 return 0;
649 }
650
651 /*
652 * Opens a disk image (raw, qcow2, vmdk, ...)
653 */
654 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
655 BlockDriver *drv)
656 {
657 int ret;
658 char tmp_filename[PATH_MAX];
659
660 if (flags & BDRV_O_SNAPSHOT) {
661 BlockDriverState *bs1;
662 int64_t total_size;
663 int is_protocol = 0;
664 BlockDriver *bdrv_qcow2;
665 QEMUOptionParameter *options;
666 char backing_filename[PATH_MAX];
667
668 /* if snapshot, we create a temporary backing file and open it
669 instead of opening 'filename' directly */
670
671 /* if there is a backing file, use it */
672 bs1 = bdrv_new("");
673 ret = bdrv_open(bs1, filename, 0, drv);
674 if (ret < 0) {
675 bdrv_delete(bs1);
676 return ret;
677 }
678 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
679
680 if (bs1->drv && bs1->drv->protocol_name)
681 is_protocol = 1;
682
683 bdrv_delete(bs1);
684
685 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
686
687 /* Real path is meaningless for protocols */
688 if (is_protocol)
689 snprintf(backing_filename, sizeof(backing_filename),
690 "%s", filename);
691 else if (!realpath(filename, backing_filename))
692 return -errno;
693
694 bdrv_qcow2 = bdrv_find_format("qcow2");
695 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
696
697 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
698 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
699 if (drv) {
700 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
701 drv->format_name);
702 }
703
704 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
705 free_option_parameters(options);
706 if (ret < 0) {
707 return ret;
708 }
709
710 filename = tmp_filename;
711 drv = bdrv_qcow2;
712 bs->is_temporary = 1;
713 }
714
715 /* Find the right image format driver */
716 if (!drv) {
717 ret = find_image_format(filename, &drv);
718 }
719
720 if (!drv) {
721 goto unlink_and_fail;
722 }
723
724 /* Open the image */
725 ret = bdrv_open_common(bs, filename, flags, drv);
726 if (ret < 0) {
727 goto unlink_and_fail;
728 }
729
730 /* If there is a backing file, use it */
731 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
732 char backing_filename[PATH_MAX];
733 int back_flags;
734 BlockDriver *back_drv = NULL;
735
736 bs->backing_hd = bdrv_new("");
737
738 if (path_has_protocol(bs->backing_file)) {
739 pstrcpy(backing_filename, sizeof(backing_filename),
740 bs->backing_file);
741 } else {
742 path_combine(backing_filename, sizeof(backing_filename),
743 filename, bs->backing_file);
744 }
745
746 if (bs->backing_format[0] != '\0') {
747 back_drv = bdrv_find_format(bs->backing_format);
748 }
749
750 /* backing files always opened read-only */
751 back_flags =
752 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
753
754 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
755 if (ret < 0) {
756 bdrv_close(bs);
757 return ret;
758 }
759 if (bs->is_temporary) {
760 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
761 } else {
762 /* base image inherits from "parent" */
763 bs->backing_hd->keep_read_only = bs->keep_read_only;
764 }
765 }
766
767 if (!bdrv_key_required(bs)) {
768 bdrv_dev_change_media_cb(bs, true);
769 }
770
771 /* throttling disk I/O limits */
772 if (bs->io_limits_enabled) {
773 bdrv_io_limits_enable(bs);
774 }
775
776 return 0;
777
778 unlink_and_fail:
779 if (bs->is_temporary) {
780 unlink(filename);
781 }
782 return ret;
783 }
784
785 void bdrv_close(BlockDriverState *bs)
786 {
787 if (bs->drv) {
788 if (bs == bs_snapshots) {
789 bs_snapshots = NULL;
790 }
791 if (bs->backing_hd) {
792 bdrv_delete(bs->backing_hd);
793 bs->backing_hd = NULL;
794 }
795 bs->drv->bdrv_close(bs);
796 g_free(bs->opaque);
797 #ifdef _WIN32
798 if (bs->is_temporary) {
799 unlink(bs->filename);
800 }
801 #endif
802 bs->opaque = NULL;
803 bs->drv = NULL;
804
805 if (bs->file != NULL) {
806 bdrv_close(bs->file);
807 }
808
809 bdrv_dev_change_media_cb(bs, false);
810 }
811
812 /*throttling disk I/O limits*/
813 if (bs->io_limits_enabled) {
814 bdrv_io_limits_disable(bs);
815 }
816 }
817
818 void bdrv_close_all(void)
819 {
820 BlockDriverState *bs;
821
822 QTAILQ_FOREACH(bs, &bdrv_states, list) {
823 bdrv_close(bs);
824 }
825 }
826
827 /* make a BlockDriverState anonymous by removing from bdrv_state list.
828 Also, NULL terminate the device_name to prevent double remove */
829 void bdrv_make_anon(BlockDriverState *bs)
830 {
831 if (bs->device_name[0] != '\0') {
832 QTAILQ_REMOVE(&bdrv_states, bs, list);
833 }
834 bs->device_name[0] = '\0';
835 }
836
837 void bdrv_delete(BlockDriverState *bs)
838 {
839 assert(!bs->dev);
840
841 /* remove from list, if necessary */
842 bdrv_make_anon(bs);
843
844 bdrv_close(bs);
845 if (bs->file != NULL) {
846 bdrv_delete(bs->file);
847 }
848
849 assert(bs != bs_snapshots);
850 g_free(bs);
851 }
852
853 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
854 /* TODO change to DeviceState *dev when all users are qdevified */
855 {
856 if (bs->dev) {
857 return -EBUSY;
858 }
859 bs->dev = dev;
860 bdrv_iostatus_reset(bs);
861 return 0;
862 }
863
864 /* TODO qdevified devices don't use this, remove when devices are qdevified */
865 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
866 {
867 if (bdrv_attach_dev(bs, dev) < 0) {
868 abort();
869 }
870 }
871
872 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
873 /* TODO change to DeviceState *dev when all users are qdevified */
874 {
875 assert(bs->dev == dev);
876 bs->dev = NULL;
877 bs->dev_ops = NULL;
878 bs->dev_opaque = NULL;
879 bs->buffer_alignment = 512;
880 }
881
882 /* TODO change to return DeviceState * when all users are qdevified */
883 void *bdrv_get_attached_dev(BlockDriverState *bs)
884 {
885 return bs->dev;
886 }
887
888 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
889 void *opaque)
890 {
891 bs->dev_ops = ops;
892 bs->dev_opaque = opaque;
893 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
894 bs_snapshots = NULL;
895 }
896 }
897
898 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
899 {
900 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
901 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
902 }
903 }
904
905 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
906 {
907 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
908 }
909
910 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
911 {
912 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
913 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
914 }
915 }
916
917 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
918 {
919 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
920 return bs->dev_ops->is_tray_open(bs->dev_opaque);
921 }
922 return false;
923 }
924
925 static void bdrv_dev_resize_cb(BlockDriverState *bs)
926 {
927 if (bs->dev_ops && bs->dev_ops->resize_cb) {
928 bs->dev_ops->resize_cb(bs->dev_opaque);
929 }
930 }
931
932 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
933 {
934 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
935 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
936 }
937 return false;
938 }
939
940 /*
941 * Run consistency checks on an image
942 *
943 * Returns 0 if the check could be completed (it doesn't mean that the image is
944 * free of errors) or -errno when an internal error occurred. The results of the
945 * check are stored in res.
946 */
947 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
948 {
949 if (bs->drv->bdrv_check == NULL) {
950 return -ENOTSUP;
951 }
952
953 memset(res, 0, sizeof(*res));
954 return bs->drv->bdrv_check(bs, res);
955 }
956
957 #define COMMIT_BUF_SECTORS 2048
958
959 /* commit COW file into the raw image */
960 int bdrv_commit(BlockDriverState *bs)
961 {
962 BlockDriver *drv = bs->drv;
963 BlockDriver *backing_drv;
964 int64_t sector, total_sectors;
965 int n, ro, open_flags;
966 int ret = 0, rw_ret = 0;
967 uint8_t *buf;
968 char filename[1024];
969 BlockDriverState *bs_rw, *bs_ro;
970
971 if (!drv)
972 return -ENOMEDIUM;
973
974 if (!bs->backing_hd) {
975 return -ENOTSUP;
976 }
977
978 if (bs->backing_hd->keep_read_only) {
979 return -EACCES;
980 }
981
982 backing_drv = bs->backing_hd->drv;
983 ro = bs->backing_hd->read_only;
984 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
985 open_flags = bs->backing_hd->open_flags;
986
987 if (ro) {
988 /* re-open as RW */
989 bdrv_delete(bs->backing_hd);
990 bs->backing_hd = NULL;
991 bs_rw = bdrv_new("");
992 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
993 backing_drv);
994 if (rw_ret < 0) {
995 bdrv_delete(bs_rw);
996 /* try to re-open read-only */
997 bs_ro = bdrv_new("");
998 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
999 backing_drv);
1000 if (ret < 0) {
1001 bdrv_delete(bs_ro);
1002 /* drive not functional anymore */
1003 bs->drv = NULL;
1004 return ret;
1005 }
1006 bs->backing_hd = bs_ro;
1007 return rw_ret;
1008 }
1009 bs->backing_hd = bs_rw;
1010 }
1011
1012 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1013 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1014
1015 for (sector = 0; sector < total_sectors; sector += n) {
1016 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
1017
1018 if (bdrv_read(bs, sector, buf, n) != 0) {
1019 ret = -EIO;
1020 goto ro_cleanup;
1021 }
1022
1023 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1024 ret = -EIO;
1025 goto ro_cleanup;
1026 }
1027 }
1028 }
1029
1030 if (drv->bdrv_make_empty) {
1031 ret = drv->bdrv_make_empty(bs);
1032 bdrv_flush(bs);
1033 }
1034
1035 /*
1036 * Make sure all data we wrote to the backing device is actually
1037 * stable on disk.
1038 */
1039 if (bs->backing_hd)
1040 bdrv_flush(bs->backing_hd);
1041
1042 ro_cleanup:
1043 g_free(buf);
1044
1045 if (ro) {
1046 /* re-open as RO */
1047 bdrv_delete(bs->backing_hd);
1048 bs->backing_hd = NULL;
1049 bs_ro = bdrv_new("");
1050 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1051 backing_drv);
1052 if (ret < 0) {
1053 bdrv_delete(bs_ro);
1054 /* drive not functional anymore */
1055 bs->drv = NULL;
1056 return ret;
1057 }
1058 bs->backing_hd = bs_ro;
1059 bs->backing_hd->keep_read_only = 0;
1060 }
1061
1062 return ret;
1063 }
1064
1065 void bdrv_commit_all(void)
1066 {
1067 BlockDriverState *bs;
1068
1069 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1070 bdrv_commit(bs);
1071 }
1072 }
1073
1074 /*
1075 * Return values:
1076 * 0 - success
1077 * -EINVAL - backing format specified, but no file
1078 * -ENOSPC - can't update the backing file because no space is left in the
1079 * image file header
1080 * -ENOTSUP - format driver doesn't support changing the backing file
1081 */
1082 int bdrv_change_backing_file(BlockDriverState *bs,
1083 const char *backing_file, const char *backing_fmt)
1084 {
1085 BlockDriver *drv = bs->drv;
1086
1087 if (drv->bdrv_change_backing_file != NULL) {
1088 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
1089 } else {
1090 return -ENOTSUP;
1091 }
1092 }
1093
1094 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1095 size_t size)
1096 {
1097 int64_t len;
1098
1099 if (!bdrv_is_inserted(bs))
1100 return -ENOMEDIUM;
1101
1102 if (bs->growable)
1103 return 0;
1104
1105 len = bdrv_getlength(bs);
1106
1107 if (offset < 0)
1108 return -EIO;
1109
1110 if ((offset > len) || (len - offset < size))
1111 return -EIO;
1112
1113 return 0;
1114 }
1115
1116 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1117 int nb_sectors)
1118 {
1119 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1120 nb_sectors * BDRV_SECTOR_SIZE);
1121 }
1122
1123 typedef struct RwCo {
1124 BlockDriverState *bs;
1125 int64_t sector_num;
1126 int nb_sectors;
1127 QEMUIOVector *qiov;
1128 bool is_write;
1129 int ret;
1130 } RwCo;
1131
1132 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1133 {
1134 RwCo *rwco = opaque;
1135
1136 if (!rwco->is_write) {
1137 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
1138 rwco->nb_sectors, rwco->qiov);
1139 } else {
1140 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
1141 rwco->nb_sectors, rwco->qiov);
1142 }
1143 }
1144
1145 /*
1146 * Process a synchronous request using coroutines
1147 */
1148 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1149 int nb_sectors, bool is_write)
1150 {
1151 QEMUIOVector qiov;
1152 struct iovec iov = {
1153 .iov_base = (void *)buf,
1154 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1155 };
1156 Coroutine *co;
1157 RwCo rwco = {
1158 .bs = bs,
1159 .sector_num = sector_num,
1160 .nb_sectors = nb_sectors,
1161 .qiov = &qiov,
1162 .is_write = is_write,
1163 .ret = NOT_DONE,
1164 };
1165
1166 qemu_iovec_init_external(&qiov, &iov, 1);
1167
1168 if (qemu_in_coroutine()) {
1169 /* Fast-path if already in coroutine context */
1170 bdrv_rw_co_entry(&rwco);
1171 } else {
1172 co = qemu_coroutine_create(bdrv_rw_co_entry);
1173 qemu_coroutine_enter(co, &rwco);
1174 while (rwco.ret == NOT_DONE) {
1175 qemu_aio_wait();
1176 }
1177 }
1178 return rwco.ret;
1179 }
1180
1181 /* return < 0 if error. See bdrv_write() for the return codes */
1182 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
1183 uint8_t *buf, int nb_sectors)
1184 {
1185 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
1186 }
1187
1188 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
1189 int nb_sectors, int dirty)
1190 {
1191 int64_t start, end;
1192 unsigned long val, idx, bit;
1193
1194 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
1195 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
1196
1197 for (; start <= end; start++) {
1198 idx = start / (sizeof(unsigned long) * 8);
1199 bit = start % (sizeof(unsigned long) * 8);
1200 val = bs->dirty_bitmap[idx];
1201 if (dirty) {
1202 if (!(val & (1UL << bit))) {
1203 bs->dirty_count++;
1204 val |= 1UL << bit;
1205 }
1206 } else {
1207 if (val & (1UL << bit)) {
1208 bs->dirty_count--;
1209 val &= ~(1UL << bit);
1210 }
1211 }
1212 bs->dirty_bitmap[idx] = val;
1213 }
1214 }
1215
1216 /* Return < 0 if error. Important errors are:
1217 -EIO generic I/O error (may happen for all errors)
1218 -ENOMEDIUM No media inserted.
1219 -EINVAL Invalid sector number or nb_sectors
1220 -EACCES Trying to write a read-only device
1221 */
1222 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
1223 const uint8_t *buf, int nb_sectors)
1224 {
1225 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
1226 }
1227
1228 int bdrv_pread(BlockDriverState *bs, int64_t offset,
1229 void *buf, int count1)
1230 {
1231 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1232 int len, nb_sectors, count;
1233 int64_t sector_num;
1234 int ret;
1235
1236 count = count1;
1237 /* first read to align to sector start */
1238 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1239 if (len > count)
1240 len = count;
1241 sector_num = offset >> BDRV_SECTOR_BITS;
1242 if (len > 0) {
1243 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1244 return ret;
1245 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1246 count -= len;
1247 if (count == 0)
1248 return count1;
1249 sector_num++;
1250 buf += len;
1251 }
1252
1253 /* read the sectors "in place" */
1254 nb_sectors = count >> BDRV_SECTOR_BITS;
1255 if (nb_sectors > 0) {
1256 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1257 return ret;
1258 sector_num += nb_sectors;
1259 len = nb_sectors << BDRV_SECTOR_BITS;
1260 buf += len;
1261 count -= len;
1262 }
1263
1264 /* add data from the last sector */
1265 if (count > 0) {
1266 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1267 return ret;
1268 memcpy(buf, tmp_buf, count);
1269 }
1270 return count1;
1271 }
1272
1273 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1274 const void *buf, int count1)
1275 {
1276 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1277 int len, nb_sectors, count;
1278 int64_t sector_num;
1279 int ret;
1280
1281 count = count1;
1282 /* first write to align to sector start */
1283 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1284 if (len > count)
1285 len = count;
1286 sector_num = offset >> BDRV_SECTOR_BITS;
1287 if (len > 0) {
1288 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1289 return ret;
1290 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1291 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1292 return ret;
1293 count -= len;
1294 if (count == 0)
1295 return count1;
1296 sector_num++;
1297 buf += len;
1298 }
1299
1300 /* write the sectors "in place" */
1301 nb_sectors = count >> BDRV_SECTOR_BITS;
1302 if (nb_sectors > 0) {
1303 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1304 return ret;
1305 sector_num += nb_sectors;
1306 len = nb_sectors << BDRV_SECTOR_BITS;
1307 buf += len;
1308 count -= len;
1309 }
1310
1311 /* add data from the last sector */
1312 if (count > 0) {
1313 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1314 return ret;
1315 memcpy(tmp_buf, buf, count);
1316 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1317 return ret;
1318 }
1319 return count1;
1320 }
1321
1322 /*
1323 * Writes to the file and ensures that no writes are reordered across this
1324 * request (acts as a barrier)
1325 *
1326 * Returns 0 on success, -errno in error cases.
1327 */
1328 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1329 const void *buf, int count)
1330 {
1331 int ret;
1332
1333 ret = bdrv_pwrite(bs, offset, buf, count);
1334 if (ret < 0) {
1335 return ret;
1336 }
1337
1338 /* No flush needed for cache modes that use O_DSYNC */
1339 if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) {
1340 bdrv_flush(bs);
1341 }
1342
1343 return 0;
1344 }
1345
1346 /*
1347 * Handle a read request in coroutine context
1348 */
1349 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1350 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1351 {
1352 BlockDriver *drv = bs->drv;
1353
1354 if (!drv) {
1355 return -ENOMEDIUM;
1356 }
1357 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1358 return -EIO;
1359 }
1360
1361 /* throttling disk read I/O */
1362 if (bs->io_limits_enabled) {
1363 bdrv_io_limits_intercept(bs, false, nb_sectors);
1364 }
1365
1366 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1367 }
1368
1369 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1370 int nb_sectors, QEMUIOVector *qiov)
1371 {
1372 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1373
1374 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
1375 }
1376
1377 /*
1378 * Handle a write request in coroutine context
1379 */
1380 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1381 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1382 {
1383 BlockDriver *drv = bs->drv;
1384 int ret;
1385
1386 if (!bs->drv) {
1387 return -ENOMEDIUM;
1388 }
1389 if (bs->read_only) {
1390 return -EACCES;
1391 }
1392 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1393 return -EIO;
1394 }
1395
1396 /* throttling disk write I/O */
1397 if (bs->io_limits_enabled) {
1398 bdrv_io_limits_intercept(bs, true, nb_sectors);
1399 }
1400
1401 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1402
1403 if (bs->dirty_bitmap) {
1404 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1405 }
1406
1407 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
1408 bs->wr_highest_sector = sector_num + nb_sectors - 1;
1409 }
1410
1411 return ret;
1412 }
1413
1414 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1415 int nb_sectors, QEMUIOVector *qiov)
1416 {
1417 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1418
1419 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov);
1420 }
1421
1422 /**
1423 * Truncate file to 'offset' bytes (needed only for file protocols)
1424 */
1425 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1426 {
1427 BlockDriver *drv = bs->drv;
1428 int ret;
1429 if (!drv)
1430 return -ENOMEDIUM;
1431 if (!drv->bdrv_truncate)
1432 return -ENOTSUP;
1433 if (bs->read_only)
1434 return -EACCES;
1435 if (bdrv_in_use(bs))
1436 return -EBUSY;
1437 ret = drv->bdrv_truncate(bs, offset);
1438 if (ret == 0) {
1439 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1440 bdrv_dev_resize_cb(bs);
1441 }
1442 return ret;
1443 }
1444
1445 /**
1446 * Length of a allocated file in bytes. Sparse files are counted by actual
1447 * allocated space. Return < 0 if error or unknown.
1448 */
1449 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
1450 {
1451 BlockDriver *drv = bs->drv;
1452 if (!drv) {
1453 return -ENOMEDIUM;
1454 }
1455 if (drv->bdrv_get_allocated_file_size) {
1456 return drv->bdrv_get_allocated_file_size(bs);
1457 }
1458 if (bs->file) {
1459 return bdrv_get_allocated_file_size(bs->file);
1460 }
1461 return -ENOTSUP;
1462 }
1463
1464 /**
1465 * Length of a file in bytes. Return < 0 if error or unknown.
1466 */
1467 int64_t bdrv_getlength(BlockDriverState *bs)
1468 {
1469 BlockDriver *drv = bs->drv;
1470 if (!drv)
1471 return -ENOMEDIUM;
1472
1473 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
1474 if (drv->bdrv_getlength) {
1475 return drv->bdrv_getlength(bs);
1476 }
1477 }
1478 return bs->total_sectors * BDRV_SECTOR_SIZE;
1479 }
1480
1481 /* return 0 as number of sectors if no device present or error */
1482 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1483 {
1484 int64_t length;
1485 length = bdrv_getlength(bs);
1486 if (length < 0)
1487 length = 0;
1488 else
1489 length = length >> BDRV_SECTOR_BITS;
1490 *nb_sectors_ptr = length;
1491 }
1492
1493 struct partition {
1494 uint8_t boot_ind; /* 0x80 - active */
1495 uint8_t head; /* starting head */
1496 uint8_t sector; /* starting sector */
1497 uint8_t cyl; /* starting cylinder */
1498 uint8_t sys_ind; /* What partition type */
1499 uint8_t end_head; /* end head */
1500 uint8_t end_sector; /* end sector */
1501 uint8_t end_cyl; /* end cylinder */
1502 uint32_t start_sect; /* starting sector counting from 0 */
1503 uint32_t nr_sects; /* nr of sectors in partition */
1504 } QEMU_PACKED;
1505
1506 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1507 static int guess_disk_lchs(BlockDriverState *bs,
1508 int *pcylinders, int *pheads, int *psectors)
1509 {
1510 uint8_t buf[BDRV_SECTOR_SIZE];
1511 int ret, i, heads, sectors, cylinders;
1512 struct partition *p;
1513 uint32_t nr_sects;
1514 uint64_t nb_sectors;
1515
1516 bdrv_get_geometry(bs, &nb_sectors);
1517
1518 ret = bdrv_read(bs, 0, buf, 1);
1519 if (ret < 0)
1520 return -1;
1521 /* test msdos magic */
1522 if (buf[510] != 0x55 || buf[511] != 0xaa)
1523 return -1;
1524 for(i = 0; i < 4; i++) {
1525 p = ((struct partition *)(buf + 0x1be)) + i;
1526 nr_sects = le32_to_cpu(p->nr_sects);
1527 if (nr_sects && p->end_head) {
1528 /* We make the assumption that the partition terminates on
1529 a cylinder boundary */
1530 heads = p->end_head + 1;
1531 sectors = p->end_sector & 63;
1532 if (sectors == 0)
1533 continue;
1534 cylinders = nb_sectors / (heads * sectors);
1535 if (cylinders < 1 || cylinders > 16383)
1536 continue;
1537 *pheads = heads;
1538 *psectors = sectors;
1539 *pcylinders = cylinders;
1540 #if 0
1541 printf("guessed geometry: LCHS=%d %d %d\n",
1542 cylinders, heads, sectors);
1543 #endif
1544 return 0;
1545 }
1546 }
1547 return -1;
1548 }
1549
1550 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1551 {
1552 int translation, lba_detected = 0;
1553 int cylinders, heads, secs;
1554 uint64_t nb_sectors;
1555
1556 /* if a geometry hint is available, use it */
1557 bdrv_get_geometry(bs, &nb_sectors);
1558 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1559 translation = bdrv_get_translation_hint(bs);
1560 if (cylinders != 0) {
1561 *pcyls = cylinders;
1562 *pheads = heads;
1563 *psecs = secs;
1564 } else {
1565 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1566 if (heads > 16) {
1567 /* if heads > 16, it means that a BIOS LBA
1568 translation was active, so the default
1569 hardware geometry is OK */
1570 lba_detected = 1;
1571 goto default_geometry;
1572 } else {
1573 *pcyls = cylinders;
1574 *pheads = heads;
1575 *psecs = secs;
1576 /* disable any translation to be in sync with
1577 the logical geometry */
1578 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1579 bdrv_set_translation_hint(bs,
1580 BIOS_ATA_TRANSLATION_NONE);
1581 }
1582 }
1583 } else {
1584 default_geometry:
1585 /* if no geometry, use a standard physical disk geometry */
1586 cylinders = nb_sectors / (16 * 63);
1587
1588 if (cylinders > 16383)
1589 cylinders = 16383;
1590 else if (cylinders < 2)
1591 cylinders = 2;
1592 *pcyls = cylinders;
1593 *pheads = 16;
1594 *psecs = 63;
1595 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1596 if ((*pcyls * *pheads) <= 131072) {
1597 bdrv_set_translation_hint(bs,
1598 BIOS_ATA_TRANSLATION_LARGE);
1599 } else {
1600 bdrv_set_translation_hint(bs,
1601 BIOS_ATA_TRANSLATION_LBA);
1602 }
1603 }
1604 }
1605 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1606 }
1607 }
1608
1609 void bdrv_set_geometry_hint(BlockDriverState *bs,
1610 int cyls, int heads, int secs)
1611 {
1612 bs->cyls = cyls;
1613 bs->heads = heads;
1614 bs->secs = secs;
1615 }
1616
1617 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1618 {
1619 bs->translation = translation;
1620 }
1621
1622 void bdrv_get_geometry_hint(BlockDriverState *bs,
1623 int *pcyls, int *pheads, int *psecs)
1624 {
1625 *pcyls = bs->cyls;
1626 *pheads = bs->heads;
1627 *psecs = bs->secs;
1628 }
1629
1630 /* throttling disk io limits */
1631 void bdrv_set_io_limits(BlockDriverState *bs,
1632 BlockIOLimit *io_limits)
1633 {
1634 bs->io_limits = *io_limits;
1635 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
1636 }
1637
1638 /* Recognize floppy formats */
1639 typedef struct FDFormat {
1640 FDriveType drive;
1641 uint8_t last_sect;
1642 uint8_t max_track;
1643 uint8_t max_head;
1644 } FDFormat;
1645
1646 static const FDFormat fd_formats[] = {
1647 /* First entry is default format */
1648 /* 1.44 MB 3"1/2 floppy disks */
1649 { FDRIVE_DRV_144, 18, 80, 1, },
1650 { FDRIVE_DRV_144, 20, 80, 1, },
1651 { FDRIVE_DRV_144, 21, 80, 1, },
1652 { FDRIVE_DRV_144, 21, 82, 1, },
1653 { FDRIVE_DRV_144, 21, 83, 1, },
1654 { FDRIVE_DRV_144, 22, 80, 1, },
1655 { FDRIVE_DRV_144, 23, 80, 1, },
1656 { FDRIVE_DRV_144, 24, 80, 1, },
1657 /* 2.88 MB 3"1/2 floppy disks */
1658 { FDRIVE_DRV_288, 36, 80, 1, },
1659 { FDRIVE_DRV_288, 39, 80, 1, },
1660 { FDRIVE_DRV_288, 40, 80, 1, },
1661 { FDRIVE_DRV_288, 44, 80, 1, },
1662 { FDRIVE_DRV_288, 48, 80, 1, },
1663 /* 720 kB 3"1/2 floppy disks */
1664 { FDRIVE_DRV_144, 9, 80, 1, },
1665 { FDRIVE_DRV_144, 10, 80, 1, },
1666 { FDRIVE_DRV_144, 10, 82, 1, },
1667 { FDRIVE_DRV_144, 10, 83, 1, },
1668 { FDRIVE_DRV_144, 13, 80, 1, },
1669 { FDRIVE_DRV_144, 14, 80, 1, },
1670 /* 1.2 MB 5"1/4 floppy disks */
1671 { FDRIVE_DRV_120, 15, 80, 1, },
1672 { FDRIVE_DRV_120, 18, 80, 1, },
1673 { FDRIVE_DRV_120, 18, 82, 1, },
1674 { FDRIVE_DRV_120, 18, 83, 1, },
1675 { FDRIVE_DRV_120, 20, 80, 1, },
1676 /* 720 kB 5"1/4 floppy disks */
1677 { FDRIVE_DRV_120, 9, 80, 1, },
1678 { FDRIVE_DRV_120, 11, 80, 1, },
1679 /* 360 kB 5"1/4 floppy disks */
1680 { FDRIVE_DRV_120, 9, 40, 1, },
1681 { FDRIVE_DRV_120, 9, 40, 0, },
1682 { FDRIVE_DRV_120, 10, 41, 1, },
1683 { FDRIVE_DRV_120, 10, 42, 1, },
1684 /* 320 kB 5"1/4 floppy disks */
1685 { FDRIVE_DRV_120, 8, 40, 1, },
1686 { FDRIVE_DRV_120, 8, 40, 0, },
1687 /* 360 kB must match 5"1/4 better than 3"1/2... */
1688 { FDRIVE_DRV_144, 9, 80, 0, },
1689 /* end */
1690 { FDRIVE_DRV_NONE, -1, -1, 0, },
1691 };
1692
1693 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
1694 int *max_track, int *last_sect,
1695 FDriveType drive_in, FDriveType *drive)
1696 {
1697 const FDFormat *parse;
1698 uint64_t nb_sectors, size;
1699 int i, first_match, match;
1700
1701 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
1702 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
1703 /* User defined disk */
1704 } else {
1705 bdrv_get_geometry(bs, &nb_sectors);
1706 match = -1;
1707 first_match = -1;
1708 for (i = 0; ; i++) {
1709 parse = &fd_formats[i];
1710 if (parse->drive == FDRIVE_DRV_NONE) {
1711 break;
1712 }
1713 if (drive_in == parse->drive ||
1714 drive_in == FDRIVE_DRV_NONE) {
1715 size = (parse->max_head + 1) * parse->max_track *
1716 parse->last_sect;
1717 if (nb_sectors == size) {
1718 match = i;
1719 break;
1720 }
1721 if (first_match == -1) {
1722 first_match = i;
1723 }
1724 }
1725 }
1726 if (match == -1) {
1727 if (first_match == -1) {
1728 match = 1;
1729 } else {
1730 match = first_match;
1731 }
1732 parse = &fd_formats[match];
1733 }
1734 *nb_heads = parse->max_head + 1;
1735 *max_track = parse->max_track;
1736 *last_sect = parse->last_sect;
1737 *drive = parse->drive;
1738 }
1739 }
1740
1741 int bdrv_get_translation_hint(BlockDriverState *bs)
1742 {
1743 return bs->translation;
1744 }
1745
1746 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
1747 BlockErrorAction on_write_error)
1748 {
1749 bs->on_read_error = on_read_error;
1750 bs->on_write_error = on_write_error;
1751 }
1752
1753 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
1754 {
1755 return is_read ? bs->on_read_error : bs->on_write_error;
1756 }
1757
1758 int bdrv_is_read_only(BlockDriverState *bs)
1759 {
1760 return bs->read_only;
1761 }
1762
1763 int bdrv_is_sg(BlockDriverState *bs)
1764 {
1765 return bs->sg;
1766 }
1767
1768 int bdrv_enable_write_cache(BlockDriverState *bs)
1769 {
1770 return bs->enable_write_cache;
1771 }
1772
1773 int bdrv_is_encrypted(BlockDriverState *bs)
1774 {
1775 if (bs->backing_hd && bs->backing_hd->encrypted)
1776 return 1;
1777 return bs->encrypted;
1778 }
1779
1780 int bdrv_key_required(BlockDriverState *bs)
1781 {
1782 BlockDriverState *backing_hd = bs->backing_hd;
1783
1784 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
1785 return 1;
1786 return (bs->encrypted && !bs->valid_key);
1787 }
1788
1789 int bdrv_set_key(BlockDriverState *bs, const char *key)
1790 {
1791 int ret;
1792 if (bs->backing_hd && bs->backing_hd->encrypted) {
1793 ret = bdrv_set_key(bs->backing_hd, key);
1794 if (ret < 0)
1795 return ret;
1796 if (!bs->encrypted)
1797 return 0;
1798 }
1799 if (!bs->encrypted) {
1800 return -EINVAL;
1801 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
1802 return -ENOMEDIUM;
1803 }
1804 ret = bs->drv->bdrv_set_key(bs, key);
1805 if (ret < 0) {
1806 bs->valid_key = 0;
1807 } else if (!bs->valid_key) {
1808 bs->valid_key = 1;
1809 /* call the change callback now, we skipped it on open */
1810 bdrv_dev_change_media_cb(bs, true);
1811 }
1812 return ret;
1813 }
1814
1815 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
1816 {
1817 if (!bs->drv) {
1818 buf[0] = '\0';
1819 } else {
1820 pstrcpy(buf, buf_size, bs->drv->format_name);
1821 }
1822 }
1823
1824 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
1825 void *opaque)
1826 {
1827 BlockDriver *drv;
1828
1829 QLIST_FOREACH(drv, &bdrv_drivers, list) {
1830 it(opaque, drv->format_name);
1831 }
1832 }
1833
1834 BlockDriverState *bdrv_find(const char *name)
1835 {
1836 BlockDriverState *bs;
1837
1838 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1839 if (!strcmp(name, bs->device_name)) {
1840 return bs;
1841 }
1842 }
1843 return NULL;
1844 }
1845
1846 BlockDriverState *bdrv_next(BlockDriverState *bs)
1847 {
1848 if (!bs) {
1849 return QTAILQ_FIRST(&bdrv_states);
1850 }
1851 return QTAILQ_NEXT(bs, list);
1852 }
1853
1854 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
1855 {
1856 BlockDriverState *bs;
1857
1858 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1859 it(opaque, bs);
1860 }
1861 }
1862
1863 const char *bdrv_get_device_name(BlockDriverState *bs)
1864 {
1865 return bs->device_name;
1866 }
1867
1868 void bdrv_flush_all(void)
1869 {
1870 BlockDriverState *bs;
1871
1872 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1873 if (!bdrv_is_read_only(bs) && bdrv_is_inserted(bs)) {
1874 bdrv_flush(bs);
1875 }
1876 }
1877 }
1878
1879 int bdrv_has_zero_init(BlockDriverState *bs)
1880 {
1881 assert(bs->drv);
1882
1883 if (bs->drv->bdrv_has_zero_init) {
1884 return bs->drv->bdrv_has_zero_init(bs);
1885 }
1886
1887 return 1;
1888 }
1889
1890 typedef struct BdrvCoIsAllocatedData {
1891 BlockDriverState *bs;
1892 int64_t sector_num;
1893 int nb_sectors;
1894 int *pnum;
1895 int ret;
1896 bool done;
1897 } BdrvCoIsAllocatedData;
1898
1899 /* Coroutine wrapper for bdrv_is_allocated() */
1900 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
1901 {
1902 BdrvCoIsAllocatedData *data = opaque;
1903 BlockDriverState *bs = data->bs;
1904
1905 data->ret = bs->drv->bdrv_co_is_allocated(bs, data->sector_num,
1906 data->nb_sectors, data->pnum);
1907 data->done = true;
1908 }
1909
1910 /*
1911 * Returns true iff the specified sector is present in the disk image. Drivers
1912 * not implementing the functionality are assumed to not support backing files,
1913 * hence all their sectors are reported as allocated.
1914 *
1915 * 'pnum' is set to the number of sectors (including and immediately following
1916 * the specified sector) that are known to be in the same
1917 * allocated/unallocated state.
1918 *
1919 * 'nb_sectors' is the max value 'pnum' should be set to.
1920 */
1921 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1922 int *pnum)
1923 {
1924 int64_t n;
1925 if (bs->drv->bdrv_co_is_allocated) {
1926 Coroutine *co;
1927 BdrvCoIsAllocatedData data = {
1928 .bs = bs,
1929 .sector_num = sector_num,
1930 .nb_sectors = nb_sectors,
1931 .pnum = pnum,
1932 .done = false,
1933 };
1934
1935 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
1936 qemu_coroutine_enter(co, &data);
1937 while (!data.done) {
1938 qemu_aio_wait();
1939 }
1940 return data.ret;
1941 }
1942 if (!bs->drv->bdrv_is_allocated) {
1943 if (sector_num >= bs->total_sectors) {
1944 *pnum = 0;
1945 return 0;
1946 }
1947 n = bs->total_sectors - sector_num;
1948 *pnum = (n < nb_sectors) ? (n) : (nb_sectors);
1949 return 1;
1950 }
1951 return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);
1952 }
1953
1954 void bdrv_mon_event(const BlockDriverState *bdrv,
1955 BlockMonEventAction action, int is_read)
1956 {
1957 QObject *data;
1958 const char *action_str;
1959
1960 switch (action) {
1961 case BDRV_ACTION_REPORT:
1962 action_str = "report";
1963 break;
1964 case BDRV_ACTION_IGNORE:
1965 action_str = "ignore";
1966 break;
1967 case BDRV_ACTION_STOP:
1968 action_str = "stop";
1969 break;
1970 default:
1971 abort();
1972 }
1973
1974 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1975 bdrv->device_name,
1976 action_str,
1977 is_read ? "read" : "write");
1978 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1979
1980 qobject_decref(data);
1981 }
1982
1983 BlockInfoList *qmp_query_block(Error **errp)
1984 {
1985 BlockInfoList *head = NULL, *cur_item = NULL;
1986 BlockDriverState *bs;
1987
1988 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1989 BlockInfoList *info = g_malloc0(sizeof(*info));
1990
1991 info->value = g_malloc0(sizeof(*info->value));
1992 info->value->device = g_strdup(bs->device_name);
1993 info->value->type = g_strdup("unknown");
1994 info->value->locked = bdrv_dev_is_medium_locked(bs);
1995 info->value->removable = bdrv_dev_has_removable_media(bs);
1996
1997 if (bdrv_dev_has_removable_media(bs)) {
1998 info->value->has_tray_open = true;
1999 info->value->tray_open = bdrv_dev_is_tray_open(bs);
2000 }
2001
2002 if (bdrv_iostatus_is_enabled(bs)) {
2003 info->value->has_io_status = true;
2004 info->value->io_status = bs->iostatus;
2005 }
2006
2007 if (bs->drv) {
2008 info->value->has_inserted = true;
2009 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2010 info->value->inserted->file = g_strdup(bs->filename);
2011 info->value->inserted->ro = bs->read_only;
2012 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2013 info->value->inserted->encrypted = bs->encrypted;
2014 if (bs->backing_file[0]) {
2015 info->value->inserted->has_backing_file = true;
2016 info->value->inserted->backing_file = g_strdup(bs->backing_file);
2017 }
2018
2019 if (bs->io_limits_enabled) {
2020 info->value->inserted->bps =
2021 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2022 info->value->inserted->bps_rd =
2023 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2024 info->value->inserted->bps_wr =
2025 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2026 info->value->inserted->iops =
2027 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2028 info->value->inserted->iops_rd =
2029 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2030 info->value->inserted->iops_wr =
2031 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2032 }
2033 }
2034
2035 /* XXX: waiting for the qapi to support GSList */
2036 if (!cur_item) {
2037 head = cur_item = info;
2038 } else {
2039 cur_item->next = info;
2040 cur_item = info;
2041 }
2042 }
2043
2044 return head;
2045 }
2046
2047 /* Consider exposing this as a full fledged QMP command */
2048 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
2049 {
2050 BlockStats *s;
2051
2052 s = g_malloc0(sizeof(*s));
2053
2054 if (bs->device_name[0]) {
2055 s->has_device = true;
2056 s->device = g_strdup(bs->device_name);
2057 }
2058
2059 s->stats = g_malloc0(sizeof(*s->stats));
2060 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2061 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2062 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2063 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2064 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2065 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2066 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2067 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2068 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2069
2070 if (bs->file) {
2071 s->has_parent = true;
2072 s->parent = qmp_query_blockstat(bs->file, NULL);
2073 }
2074
2075 return s;
2076 }
2077
2078 BlockStatsList *qmp_query_blockstats(Error **errp)
2079 {
2080 BlockStatsList *head = NULL, *cur_item = NULL;
2081 BlockDriverState *bs;
2082
2083 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2084 BlockStatsList *info = g_malloc0(sizeof(*info));
2085 info->value = qmp_query_blockstat(bs, NULL);
2086
2087 /* XXX: waiting for the qapi to support GSList */
2088 if (!cur_item) {
2089 head = cur_item = info;
2090 } else {
2091 cur_item->next = info;
2092 cur_item = info;
2093 }
2094 }
2095
2096 return head;
2097 }
2098
2099 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2100 {
2101 if (bs->backing_hd && bs->backing_hd->encrypted)
2102 return bs->backing_file;
2103 else if (bs->encrypted)
2104 return bs->filename;
2105 else
2106 return NULL;
2107 }
2108
2109 void bdrv_get_backing_filename(BlockDriverState *bs,
2110 char *filename, int filename_size)
2111 {
2112 pstrcpy(filename, filename_size, bs->backing_file);
2113 }
2114
2115 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
2116 const uint8_t *buf, int nb_sectors)
2117 {
2118 BlockDriver *drv = bs->drv;
2119 if (!drv)
2120 return -ENOMEDIUM;
2121 if (!drv->bdrv_write_compressed)
2122 return -ENOTSUP;
2123 if (bdrv_check_request(bs, sector_num, nb_sectors))
2124 return -EIO;
2125
2126 if (bs->dirty_bitmap) {
2127 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2128 }
2129
2130 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2131 }
2132
2133 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2134 {
2135 BlockDriver *drv = bs->drv;
2136 if (!drv)
2137 return -ENOMEDIUM;
2138 if (!drv->bdrv_get_info)
2139 return -ENOTSUP;
2140 memset(bdi, 0, sizeof(*bdi));
2141 return drv->bdrv_get_info(bs, bdi);
2142 }
2143
2144 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2145 int64_t pos, int size)
2146 {
2147 BlockDriver *drv = bs->drv;
2148 if (!drv)
2149 return -ENOMEDIUM;
2150 if (drv->bdrv_save_vmstate)
2151 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2152 if (bs->file)
2153 return bdrv_save_vmstate(bs->file, buf, pos, size);
2154 return -ENOTSUP;
2155 }
2156
2157 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2158 int64_t pos, int size)
2159 {
2160 BlockDriver *drv = bs->drv;
2161 if (!drv)
2162 return -ENOMEDIUM;
2163 if (drv->bdrv_load_vmstate)
2164 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2165 if (bs->file)
2166 return bdrv_load_vmstate(bs->file, buf, pos, size);
2167 return -ENOTSUP;
2168 }
2169
2170 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2171 {
2172 BlockDriver *drv = bs->drv;
2173
2174 if (!drv || !drv->bdrv_debug_event) {
2175 return;
2176 }
2177
2178 return drv->bdrv_debug_event(bs, event);
2179
2180 }
2181
2182 /**************************************************************/
2183 /* handling of snapshots */
2184
2185 int bdrv_can_snapshot(BlockDriverState *bs)
2186 {
2187 BlockDriver *drv = bs->drv;
2188 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
2189 return 0;
2190 }
2191
2192 if (!drv->bdrv_snapshot_create) {
2193 if (bs->file != NULL) {
2194 return bdrv_can_snapshot(bs->file);
2195 }
2196 return 0;
2197 }
2198
2199 return 1;
2200 }
2201
2202 int bdrv_is_snapshot(BlockDriverState *bs)
2203 {
2204 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2205 }
2206
2207 BlockDriverState *bdrv_snapshots(void)
2208 {
2209 BlockDriverState *bs;
2210
2211 if (bs_snapshots) {
2212 return bs_snapshots;
2213 }
2214
2215 bs = NULL;
2216 while ((bs = bdrv_next(bs))) {
2217 if (bdrv_can_snapshot(bs)) {
2218 bs_snapshots = bs;
2219 return bs;
2220 }
2221 }
2222 return NULL;
2223 }
2224
2225 int bdrv_snapshot_create(BlockDriverState *bs,
2226 QEMUSnapshotInfo *sn_info)
2227 {
2228 BlockDriver *drv = bs->drv;
2229 if (!drv)
2230 return -ENOMEDIUM;
2231 if (drv->bdrv_snapshot_create)
2232 return drv->bdrv_snapshot_create(bs, sn_info);
2233 if (bs->file)
2234 return bdrv_snapshot_create(bs->file, sn_info);
2235 return -ENOTSUP;
2236 }
2237
2238 int bdrv_snapshot_goto(BlockDriverState *bs,
2239 const char *snapshot_id)
2240 {
2241 BlockDriver *drv = bs->drv;
2242 int ret, open_ret;
2243
2244 if (!drv)
2245 return -ENOMEDIUM;
2246 if (drv->bdrv_snapshot_goto)
2247 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2248
2249 if (bs->file) {
2250 drv->bdrv_close(bs);
2251 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2252 open_ret = drv->bdrv_open(bs, bs->open_flags);
2253 if (open_ret < 0) {
2254 bdrv_delete(bs->file);
2255 bs->drv = NULL;
2256 return open_ret;
2257 }
2258 return ret;
2259 }
2260
2261 return -ENOTSUP;
2262 }
2263
2264 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2265 {
2266 BlockDriver *drv = bs->drv;
2267 if (!drv)
2268 return -ENOMEDIUM;
2269 if (drv->bdrv_snapshot_delete)
2270 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2271 if (bs->file)
2272 return bdrv_snapshot_delete(bs->file, snapshot_id);
2273 return -ENOTSUP;
2274 }
2275
2276 int bdrv_snapshot_list(BlockDriverState *bs,
2277 QEMUSnapshotInfo **psn_info)
2278 {
2279 BlockDriver *drv = bs->drv;
2280 if (!drv)
2281 return -ENOMEDIUM;
2282 if (drv->bdrv_snapshot_list)
2283 return drv->bdrv_snapshot_list(bs, psn_info);
2284 if (bs->file)
2285 return bdrv_snapshot_list(bs->file, psn_info);
2286 return -ENOTSUP;
2287 }
2288
2289 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2290 const char *snapshot_name)
2291 {
2292 BlockDriver *drv = bs->drv;
2293 if (!drv) {
2294 return -ENOMEDIUM;
2295 }
2296 if (!bs->read_only) {
2297 return -EINVAL;
2298 }
2299 if (drv->bdrv_snapshot_load_tmp) {
2300 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2301 }
2302 return -ENOTSUP;
2303 }
2304
2305 #define NB_SUFFIXES 4
2306
2307 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2308 {
2309 static const char suffixes[NB_SUFFIXES] = "KMGT";
2310 int64_t base;
2311 int i;
2312
2313 if (size <= 999) {
2314 snprintf(buf, buf_size, "%" PRId64, size);
2315 } else {
2316 base = 1024;
2317 for(i = 0; i < NB_SUFFIXES; i++) {
2318 if (size < (10 * base)) {
2319 snprintf(buf, buf_size, "%0.1f%c",
2320 (double)size / base,
2321 suffixes[i]);
2322 break;
2323 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2324 snprintf(buf, buf_size, "%" PRId64 "%c",
2325 ((size + (base >> 1)) / base),
2326 suffixes[i]);
2327 break;
2328 }
2329 base = base * 1024;
2330 }
2331 }
2332 return buf;
2333 }
2334
2335 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2336 {
2337 char buf1[128], date_buf[128], clock_buf[128];
2338 #ifdef _WIN32
2339 struct tm *ptm;
2340 #else
2341 struct tm tm;
2342 #endif
2343 time_t ti;
2344 int64_t secs;
2345
2346 if (!sn) {
2347 snprintf(buf, buf_size,
2348 "%-10s%-20s%7s%20s%15s",
2349 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2350 } else {
2351 ti = sn->date_sec;
2352 #ifdef _WIN32
2353 ptm = localtime(&ti);
2354 strftime(date_buf, sizeof(date_buf),
2355 "%Y-%m-%d %H:%M:%S", ptm);
2356 #else
2357 localtime_r(&ti, &tm);
2358 strftime(date_buf, sizeof(date_buf),
2359 "%Y-%m-%d %H:%M:%S", &tm);
2360 #endif
2361 secs = sn->vm_clock_nsec / 1000000000;
2362 snprintf(clock_buf, sizeof(clock_buf),
2363 "%02d:%02d:%02d.%03d",
2364 (int)(secs / 3600),
2365 (int)((secs / 60) % 60),
2366 (int)(secs % 60),
2367 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2368 snprintf(buf, buf_size,
2369 "%-10s%-20s%7s%20s%15s",
2370 sn->id_str, sn->name,
2371 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2372 date_buf,
2373 clock_buf);
2374 }
2375 return buf;
2376 }
2377
2378 /**************************************************************/
2379 /* async I/Os */
2380
2381 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2382 QEMUIOVector *qiov, int nb_sectors,
2383 BlockDriverCompletionFunc *cb, void *opaque)
2384 {
2385 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2386
2387 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2388 cb, opaque, false);
2389 }
2390
2391 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2392 QEMUIOVector *qiov, int nb_sectors,
2393 BlockDriverCompletionFunc *cb, void *opaque)
2394 {
2395 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2396
2397 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2398 cb, opaque, true);
2399 }
2400
2401
2402 typedef struct MultiwriteCB {
2403 int error;
2404 int num_requests;
2405 int num_callbacks;
2406 struct {
2407 BlockDriverCompletionFunc *cb;
2408 void *opaque;
2409 QEMUIOVector *free_qiov;
2410 void *free_buf;
2411 } callbacks[];
2412 } MultiwriteCB;
2413
2414 static void multiwrite_user_cb(MultiwriteCB *mcb)
2415 {
2416 int i;
2417
2418 for (i = 0; i < mcb->num_callbacks; i++) {
2419 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2420 if (mcb->callbacks[i].free_qiov) {
2421 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2422 }
2423 g_free(mcb->callbacks[i].free_qiov);
2424 qemu_vfree(mcb->callbacks[i].free_buf);
2425 }
2426 }
2427
2428 static void multiwrite_cb(void *opaque, int ret)
2429 {
2430 MultiwriteCB *mcb = opaque;
2431
2432 trace_multiwrite_cb(mcb, ret);
2433
2434 if (ret < 0 && !mcb->error) {
2435 mcb->error = ret;
2436 }
2437
2438 mcb->num_requests--;
2439 if (mcb->num_requests == 0) {
2440 multiwrite_user_cb(mcb);
2441 g_free(mcb);
2442 }
2443 }
2444
2445 static int multiwrite_req_compare(const void *a, const void *b)
2446 {
2447 const BlockRequest *req1 = a, *req2 = b;
2448
2449 /*
2450 * Note that we can't simply subtract req2->sector from req1->sector
2451 * here as that could overflow the return value.
2452 */
2453 if (req1->sector > req2->sector) {
2454 return 1;
2455 } else if (req1->sector < req2->sector) {
2456 return -1;
2457 } else {
2458 return 0;
2459 }
2460 }
2461
2462 /*
2463 * Takes a bunch of requests and tries to merge them. Returns the number of
2464 * requests that remain after merging.
2465 */
2466 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2467 int num_reqs, MultiwriteCB *mcb)
2468 {
2469 int i, outidx;
2470
2471 // Sort requests by start sector
2472 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2473
2474 // Check if adjacent requests touch the same clusters. If so, combine them,
2475 // filling up gaps with zero sectors.
2476 outidx = 0;
2477 for (i = 1; i < num_reqs; i++) {
2478 int merge = 0;
2479 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2480
2481 // This handles the cases that are valid for all block drivers, namely
2482 // exactly sequential writes and overlapping writes.
2483 if (reqs[i].sector <= oldreq_last) {
2484 merge = 1;
2485 }
2486
2487 // The block driver may decide that it makes sense to combine requests
2488 // even if there is a gap of some sectors between them. In this case,
2489 // the gap is filled with zeros (therefore only applicable for yet
2490 // unused space in format like qcow2).
2491 if (!merge && bs->drv->bdrv_merge_requests) {
2492 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2493 }
2494
2495 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2496 merge = 0;
2497 }
2498
2499 if (merge) {
2500 size_t size;
2501 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
2502 qemu_iovec_init(qiov,
2503 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2504
2505 // Add the first request to the merged one. If the requests are
2506 // overlapping, drop the last sectors of the first request.
2507 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2508 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2509
2510 // We might need to add some zeros between the two requests
2511 if (reqs[i].sector > oldreq_last) {
2512 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2513 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2514 memset(buf, 0, zero_bytes);
2515 qemu_iovec_add(qiov, buf, zero_bytes);
2516 mcb->callbacks[i].free_buf = buf;
2517 }
2518
2519 // Add the second request
2520 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2521
2522 reqs[outidx].nb_sectors = qiov->size >> 9;
2523 reqs[outidx].qiov = qiov;
2524
2525 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2526 } else {
2527 outidx++;
2528 reqs[outidx].sector = reqs[i].sector;
2529 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2530 reqs[outidx].qiov = reqs[i].qiov;
2531 }
2532 }
2533
2534 return outidx + 1;
2535 }
2536
2537 /*
2538 * Submit multiple AIO write requests at once.
2539 *
2540 * On success, the function returns 0 and all requests in the reqs array have
2541 * been submitted. In error case this function returns -1, and any of the
2542 * requests may or may not be submitted yet. In particular, this means that the
2543 * callback will be called for some of the requests, for others it won't. The
2544 * caller must check the error field of the BlockRequest to wait for the right
2545 * callbacks (if error != 0, no callback will be called).
2546 *
2547 * The implementation may modify the contents of the reqs array, e.g. to merge
2548 * requests. However, the fields opaque and error are left unmodified as they
2549 * are used to signal failure for a single request to the caller.
2550 */
2551 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2552 {
2553 BlockDriverAIOCB *acb;
2554 MultiwriteCB *mcb;
2555 int i;
2556
2557 /* don't submit writes if we don't have a medium */
2558 if (bs->drv == NULL) {
2559 for (i = 0; i < num_reqs; i++) {
2560 reqs[i].error = -ENOMEDIUM;
2561 }
2562 return -1;
2563 }
2564
2565 if (num_reqs == 0) {
2566 return 0;
2567 }
2568
2569 // Create MultiwriteCB structure
2570 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2571 mcb->num_requests = 0;
2572 mcb->num_callbacks = num_reqs;
2573
2574 for (i = 0; i < num_reqs; i++) {
2575 mcb->callbacks[i].cb = reqs[i].cb;
2576 mcb->callbacks[i].opaque = reqs[i].opaque;
2577 }
2578
2579 // Check for mergable requests
2580 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2581
2582 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2583
2584 /*
2585 * Run the aio requests. As soon as one request can't be submitted
2586 * successfully, fail all requests that are not yet submitted (we must
2587 * return failure for all requests anyway)
2588 *
2589 * num_requests cannot be set to the right value immediately: If
2590 * bdrv_aio_writev fails for some request, num_requests would be too high
2591 * and therefore multiwrite_cb() would never recognize the multiwrite
2592 * request as completed. We also cannot use the loop variable i to set it
2593 * when the first request fails because the callback may already have been
2594 * called for previously submitted requests. Thus, num_requests must be
2595 * incremented for each request that is submitted.
2596 *
2597 * The problem that callbacks may be called early also means that we need
2598 * to take care that num_requests doesn't become 0 before all requests are
2599 * submitted - multiwrite_cb() would consider the multiwrite request
2600 * completed. A dummy request that is "completed" by a manual call to
2601 * multiwrite_cb() takes care of this.
2602 */
2603 mcb->num_requests = 1;
2604
2605 // Run the aio requests
2606 for (i = 0; i < num_reqs; i++) {
2607 mcb->num_requests++;
2608 acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2609 reqs[i].nb_sectors, multiwrite_cb, mcb);
2610
2611 if (acb == NULL) {
2612 // We can only fail the whole thing if no request has been
2613 // submitted yet. Otherwise we'll wait for the submitted AIOs to
2614 // complete and report the error in the callback.
2615 if (i == 0) {
2616 trace_bdrv_aio_multiwrite_earlyfail(mcb);
2617 goto fail;
2618 } else {
2619 trace_bdrv_aio_multiwrite_latefail(mcb, i);
2620 multiwrite_cb(mcb, -EIO);
2621 break;
2622 }
2623 }
2624 }
2625
2626 /* Complete the dummy request */
2627 multiwrite_cb(mcb, 0);
2628
2629 return 0;
2630
2631 fail:
2632 for (i = 0; i < mcb->num_callbacks; i++) {
2633 reqs[i].error = -EIO;
2634 }
2635 g_free(mcb);
2636 return -1;
2637 }
2638
2639 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2640 {
2641 acb->pool->cancel(acb);
2642 }
2643
2644 /* block I/O throttling */
2645 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
2646 bool is_write, double elapsed_time, uint64_t *wait)
2647 {
2648 uint64_t bps_limit = 0;
2649 double bytes_limit, bytes_base, bytes_res;
2650 double slice_time, wait_time;
2651
2652 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
2653 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2654 } else if (bs->io_limits.bps[is_write]) {
2655 bps_limit = bs->io_limits.bps[is_write];
2656 } else {
2657 if (wait) {
2658 *wait = 0;
2659 }
2660
2661 return false;
2662 }
2663
2664 slice_time = bs->slice_end - bs->slice_start;
2665 slice_time /= (NANOSECONDS_PER_SECOND);
2666 bytes_limit = bps_limit * slice_time;
2667 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
2668 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
2669 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
2670 }
2671
2672 /* bytes_base: the bytes of data which have been read/written; and
2673 * it is obtained from the history statistic info.
2674 * bytes_res: the remaining bytes of data which need to be read/written.
2675 * (bytes_base + bytes_res) / bps_limit: used to calcuate
2676 * the total time for completing reading/writting all data.
2677 */
2678 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2679
2680 if (bytes_base + bytes_res <= bytes_limit) {
2681 if (wait) {
2682 *wait = 0;
2683 }
2684
2685 return false;
2686 }
2687
2688 /* Calc approx time to dispatch */
2689 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
2690
2691 /* When the I/O rate at runtime exceeds the limits,
2692 * bs->slice_end need to be extended in order that the current statistic
2693 * info can be kept until the timer fire, so it is increased and tuned
2694 * based on the result of experiment.
2695 */
2696 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
2697 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
2698 if (wait) {
2699 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
2700 }
2701
2702 return true;
2703 }
2704
2705 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
2706 double elapsed_time, uint64_t *wait)
2707 {
2708 uint64_t iops_limit = 0;
2709 double ios_limit, ios_base;
2710 double slice_time, wait_time;
2711
2712 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
2713 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2714 } else if (bs->io_limits.iops[is_write]) {
2715 iops_limit = bs->io_limits.iops[is_write];
2716 } else {
2717 if (wait) {
2718 *wait = 0;
2719 }
2720
2721 return false;
2722 }
2723
2724 slice_time = bs->slice_end - bs->slice_start;
2725 slice_time /= (NANOSECONDS_PER_SECOND);
2726 ios_limit = iops_limit * slice_time;
2727 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
2728 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
2729 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
2730 }
2731
2732 if (ios_base + 1 <= ios_limit) {
2733 if (wait) {
2734 *wait = 0;
2735 }
2736
2737 return false;
2738 }
2739
2740 /* Calc approx time to dispatch */
2741 wait_time = (ios_base + 1) / iops_limit;
2742 if (wait_time > elapsed_time) {
2743 wait_time = wait_time - elapsed_time;
2744 } else {
2745 wait_time = 0;
2746 }
2747
2748 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
2749 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
2750 if (wait) {
2751 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
2752 }
2753
2754 return true;
2755 }
2756
2757 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
2758 bool is_write, int64_t *wait)
2759 {
2760 int64_t now, max_wait;
2761 uint64_t bps_wait = 0, iops_wait = 0;
2762 double elapsed_time;
2763 int bps_ret, iops_ret;
2764
2765 now = qemu_get_clock_ns(vm_clock);
2766 if ((bs->slice_start < now)
2767 && (bs->slice_end > now)) {
2768 bs->slice_end = now + bs->slice_time;
2769 } else {
2770 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
2771 bs->slice_start = now;
2772 bs->slice_end = now + bs->slice_time;
2773
2774 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
2775 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
2776
2777 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
2778 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
2779 }
2780
2781 elapsed_time = now - bs->slice_start;
2782 elapsed_time /= (NANOSECONDS_PER_SECOND);
2783
2784 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
2785 is_write, elapsed_time, &bps_wait);
2786 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
2787 elapsed_time, &iops_wait);
2788 if (bps_ret || iops_ret) {
2789 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
2790 if (wait) {
2791 *wait = max_wait;
2792 }
2793
2794 now = qemu_get_clock_ns(vm_clock);
2795 if (bs->slice_end < now + max_wait) {
2796 bs->slice_end = now + max_wait;
2797 }
2798
2799 return true;
2800 }
2801
2802 if (wait) {
2803 *wait = 0;
2804 }
2805
2806 return false;
2807 }
2808
2809 /**************************************************************/
2810 /* async block device emulation */
2811
2812 typedef struct BlockDriverAIOCBSync {
2813 BlockDriverAIOCB common;
2814 QEMUBH *bh;
2815 int ret;
2816 /* vector translation state */
2817 QEMUIOVector *qiov;
2818 uint8_t *bounce;
2819 int is_write;
2820 } BlockDriverAIOCBSync;
2821
2822 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
2823 {
2824 BlockDriverAIOCBSync *acb =
2825 container_of(blockacb, BlockDriverAIOCBSync, common);
2826 qemu_bh_delete(acb->bh);
2827 acb->bh = NULL;
2828 qemu_aio_release(acb);
2829 }
2830
2831 static AIOPool bdrv_em_aio_pool = {
2832 .aiocb_size = sizeof(BlockDriverAIOCBSync),
2833 .cancel = bdrv_aio_cancel_em,
2834 };
2835
2836 static void bdrv_aio_bh_cb(void *opaque)
2837 {
2838 BlockDriverAIOCBSync *acb = opaque;
2839
2840 if (!acb->is_write)
2841 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
2842 qemu_vfree(acb->bounce);
2843 acb->common.cb(acb->common.opaque, acb->ret);
2844 qemu_bh_delete(acb->bh);
2845 acb->bh = NULL;
2846 qemu_aio_release(acb);
2847 }
2848
2849 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2850 int64_t sector_num,
2851 QEMUIOVector *qiov,
2852 int nb_sectors,
2853 BlockDriverCompletionFunc *cb,
2854 void *opaque,
2855 int is_write)
2856
2857 {
2858 BlockDriverAIOCBSync *acb;
2859
2860 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2861 acb->is_write = is_write;
2862 acb->qiov = qiov;
2863 acb->bounce = qemu_blockalign(bs, qiov->size);
2864
2865 if (!acb->bh)
2866 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2867
2868 if (is_write) {
2869 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
2870 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2871 } else {
2872 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2873 }
2874
2875 qemu_bh_schedule(acb->bh);
2876
2877 return &acb->common;
2878 }
2879
2880 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2881 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2882 BlockDriverCompletionFunc *cb, void *opaque)
2883 {
2884 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2885 }
2886
2887 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2888 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2889 BlockDriverCompletionFunc *cb, void *opaque)
2890 {
2891 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2892 }
2893
2894
2895 typedef struct BlockDriverAIOCBCoroutine {
2896 BlockDriverAIOCB common;
2897 BlockRequest req;
2898 bool is_write;
2899 QEMUBH* bh;
2900 } BlockDriverAIOCBCoroutine;
2901
2902 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
2903 {
2904 qemu_aio_flush();
2905 }
2906
2907 static AIOPool bdrv_em_co_aio_pool = {
2908 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
2909 .cancel = bdrv_aio_co_cancel_em,
2910 };
2911
2912 static void bdrv_co_em_bh(void *opaque)
2913 {
2914 BlockDriverAIOCBCoroutine *acb = opaque;
2915
2916 acb->common.cb(acb->common.opaque, acb->req.error);
2917 qemu_bh_delete(acb->bh);
2918 qemu_aio_release(acb);
2919 }
2920
2921 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2922 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2923 {
2924 BlockDriverAIOCBCoroutine *acb = opaque;
2925 BlockDriverState *bs = acb->common.bs;
2926
2927 if (!acb->is_write) {
2928 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
2929 acb->req.nb_sectors, acb->req.qiov);
2930 } else {
2931 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
2932 acb->req.nb_sectors, acb->req.qiov);
2933 }
2934
2935 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
2936 qemu_bh_schedule(acb->bh);
2937 }
2938
2939 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
2940 int64_t sector_num,
2941 QEMUIOVector *qiov,
2942 int nb_sectors,
2943 BlockDriverCompletionFunc *cb,
2944 void *opaque,
2945 bool is_write)
2946 {
2947 Coroutine *co;
2948 BlockDriverAIOCBCoroutine *acb;
2949
2950 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
2951 acb->req.sector = sector_num;
2952 acb->req.nb_sectors = nb_sectors;
2953 acb->req.qiov = qiov;
2954 acb->is_write = is_write;
2955
2956 co = qemu_coroutine_create(bdrv_co_do_rw);
2957 qemu_coroutine_enter(co, acb);
2958
2959 return &acb->common;
2960 }
2961
2962 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2963 {
2964 BlockDriverAIOCBCoroutine *acb = opaque;
2965 BlockDriverState *bs = acb->common.bs;
2966
2967 acb->req.error = bdrv_co_flush(bs);
2968 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
2969 qemu_bh_schedule(acb->bh);
2970 }
2971
2972 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2973 BlockDriverCompletionFunc *cb, void *opaque)
2974 {
2975 trace_bdrv_aio_flush(bs, opaque);
2976
2977 Coroutine *co;
2978 BlockDriverAIOCBCoroutine *acb;
2979
2980 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
2981 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2982 qemu_coroutine_enter(co, acb);
2983
2984 return &acb->common;
2985 }
2986
2987 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2988 {
2989 BlockDriverAIOCBCoroutine *acb = opaque;
2990 BlockDriverState *bs = acb->common.bs;
2991
2992 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2993 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
2994 qemu_bh_schedule(acb->bh);
2995 }
2996
2997 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2998 int64_t sector_num, int nb_sectors,
2999 BlockDriverCompletionFunc *cb, void *opaque)
3000 {
3001 Coroutine *co;
3002 BlockDriverAIOCBCoroutine *acb;
3003
3004 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3005
3006 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3007 acb->req.sector = sector_num;
3008 acb->req.nb_sectors = nb_sectors;
3009 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3010 qemu_coroutine_enter(co, acb);
3011
3012 return &acb->common;
3013 }
3014
3015 void bdrv_init(void)
3016 {
3017 module_call_init(MODULE_INIT_BLOCK);
3018 }
3019
3020 void bdrv_init_with_whitelist(void)
3021 {
3022 use_bdrv_whitelist = 1;
3023 bdrv_init();
3024 }
3025
3026 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3027 BlockDriverCompletionFunc *cb, void *opaque)
3028 {
3029 BlockDriverAIOCB *acb;
3030
3031 if (pool->free_aiocb) {
3032 acb = pool->free_aiocb;
3033 pool->free_aiocb = acb->next;
3034 } else {
3035 acb = g_malloc0(pool->aiocb_size);
3036 acb->pool = pool;
3037 }
3038 acb->bs = bs;
3039 acb->cb = cb;
3040 acb->opaque = opaque;
3041 return acb;
3042 }
3043
3044 void qemu_aio_release(void *p)
3045 {
3046 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3047 AIOPool *pool = acb->pool;
3048 acb->next = pool->free_aiocb;
3049 pool->free_aiocb = acb;
3050 }
3051
3052 /**************************************************************/
3053 /* Coroutine block device emulation */
3054
3055 typedef struct CoroutineIOCompletion {
3056 Coroutine *coroutine;
3057 int ret;
3058 } CoroutineIOCompletion;
3059
3060 static void bdrv_co_io_em_complete(void *opaque, int ret)
3061 {
3062 CoroutineIOCompletion *co = opaque;
3063
3064 co->ret = ret;
3065 qemu_coroutine_enter(co->coroutine, NULL);
3066 }
3067
3068 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3069 int nb_sectors, QEMUIOVector *iov,
3070 bool is_write)
3071 {
3072 CoroutineIOCompletion co = {
3073 .coroutine = qemu_coroutine_self(),
3074 };
3075 BlockDriverAIOCB *acb;
3076
3077 if (is_write) {
3078 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3079 bdrv_co_io_em_complete, &co);
3080 } else {
3081 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3082 bdrv_co_io_em_complete, &co);
3083 }
3084
3085 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
3086 if (!acb) {
3087 return -EIO;
3088 }
3089 qemu_coroutine_yield();
3090
3091 return co.ret;
3092 }
3093
3094 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3095 int64_t sector_num, int nb_sectors,
3096 QEMUIOVector *iov)
3097 {
3098 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3099 }
3100
3101 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3102 int64_t sector_num, int nb_sectors,
3103 QEMUIOVector *iov)
3104 {
3105 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3106 }
3107
3108 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
3109 {
3110 RwCo *rwco = opaque;
3111
3112 rwco->ret = bdrv_co_flush(rwco->bs);
3113 }
3114
3115 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3116 {
3117 int ret;
3118
3119 if (!bs->drv) {
3120 return 0;
3121 }
3122
3123 /* Write back cached data to the OS even with cache=unsafe */
3124 if (bs->drv->bdrv_co_flush_to_os) {
3125 ret = bs->drv->bdrv_co_flush_to_os(bs);
3126 if (ret < 0) {
3127 return ret;
3128 }
3129 }
3130
3131 /* But don't actually force it to the disk with cache=unsafe */
3132 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3133 return 0;
3134 }
3135
3136 if (bs->drv->bdrv_co_flush_to_disk) {
3137 return bs->drv->bdrv_co_flush_to_disk(bs);
3138 } else if (bs->drv->bdrv_aio_flush) {
3139 BlockDriverAIOCB *acb;
3140 CoroutineIOCompletion co = {
3141 .coroutine = qemu_coroutine_self(),
3142 };
3143
3144 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3145 if (acb == NULL) {
3146 return -EIO;
3147 } else {
3148 qemu_coroutine_yield();
3149 return co.ret;
3150 }
3151 } else {
3152 /*
3153 * Some block drivers always operate in either writethrough or unsafe
3154 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3155 * know how the server works (because the behaviour is hardcoded or
3156 * depends on server-side configuration), so we can't ensure that
3157 * everything is safe on disk. Returning an error doesn't work because
3158 * that would break guests even if the server operates in writethrough
3159 * mode.
3160 *
3161 * Let's hope the user knows what he's doing.
3162 */
3163 return 0;
3164 }
3165 }
3166
3167 void bdrv_invalidate_cache(BlockDriverState *bs)
3168 {
3169 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3170 bs->drv->bdrv_invalidate_cache(bs);
3171 }
3172 }
3173
3174 void bdrv_invalidate_cache_all(void)
3175 {
3176 BlockDriverState *bs;
3177
3178 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3179 bdrv_invalidate_cache(bs);
3180 }
3181 }
3182
3183 int bdrv_flush(BlockDriverState *bs)
3184 {
3185 Coroutine *co;
3186 RwCo rwco = {
3187 .bs = bs,
3188 .ret = NOT_DONE,
3189 };
3190
3191 if (qemu_in_coroutine()) {
3192 /* Fast-path if already in coroutine context */
3193 bdrv_flush_co_entry(&rwco);
3194 } else {
3195 co = qemu_coroutine_create(bdrv_flush_co_entry);
3196 qemu_coroutine_enter(co, &rwco);
3197 while (rwco.ret == NOT_DONE) {
3198 qemu_aio_wait();
3199 }
3200 }
3201
3202 return rwco.ret;
3203 }
3204
3205 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3206 {
3207 RwCo *rwco = opaque;
3208
3209 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3210 }
3211
3212 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3213 int nb_sectors)
3214 {
3215 if (!bs->drv) {
3216 return -ENOMEDIUM;
3217 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3218 return -EIO;
3219 } else if (bs->read_only) {
3220 return -EROFS;
3221 } else if (bs->drv->bdrv_co_discard) {
3222 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3223 } else if (bs->drv->bdrv_aio_discard) {
3224 BlockDriverAIOCB *acb;
3225 CoroutineIOCompletion co = {
3226 .coroutine = qemu_coroutine_self(),
3227 };
3228
3229 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3230 bdrv_co_io_em_complete, &co);
3231 if (acb == NULL) {
3232 return -EIO;
3233 } else {
3234 qemu_coroutine_yield();
3235 return co.ret;
3236 }
3237 } else {
3238 return 0;
3239 }
3240 }
3241
3242 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3243 {
3244 Coroutine *co;
3245 RwCo rwco = {
3246 .bs = bs,
3247 .sector_num = sector_num,
3248 .nb_sectors = nb_sectors,
3249 .ret = NOT_DONE,
3250 };
3251
3252 if (qemu_in_coroutine()) {
3253 /* Fast-path if already in coroutine context */
3254 bdrv_discard_co_entry(&rwco);
3255 } else {
3256 co = qemu_coroutine_create(bdrv_discard_co_entry);
3257 qemu_coroutine_enter(co, &rwco);
3258 while (rwco.ret == NOT_DONE) {
3259 qemu_aio_wait();
3260 }
3261 }
3262
3263 return rwco.ret;
3264 }
3265
3266 /**************************************************************/
3267 /* removable device support */
3268
3269 /**
3270 * Return TRUE if the media is present
3271 */
3272 int bdrv_is_inserted(BlockDriverState *bs)
3273 {
3274 BlockDriver *drv = bs->drv;
3275
3276 if (!drv)
3277 return 0;
3278 if (!drv->bdrv_is_inserted)
3279 return 1;
3280 return drv->bdrv_is_inserted(bs);
3281 }
3282
3283 /**
3284 * Return whether the media changed since the last call to this
3285 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3286 */
3287 int bdrv_media_changed(BlockDriverState *bs)
3288 {
3289 BlockDriver *drv = bs->drv;
3290
3291 if (drv && drv->bdrv_media_changed) {
3292 return drv->bdrv_media_changed(bs);
3293 }
3294 return -ENOTSUP;
3295 }
3296
3297 /**
3298 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3299 */
3300 void bdrv_eject(BlockDriverState *bs, int eject_flag)
3301 {
3302 BlockDriver *drv = bs->drv;
3303
3304 if (drv && drv->bdrv_eject) {
3305 drv->bdrv_eject(bs, eject_flag);
3306 }
3307 }
3308
3309 /**
3310 * Lock or unlock the media (if it is locked, the user won't be able
3311 * to eject it manually).
3312 */
3313 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
3314 {
3315 BlockDriver *drv = bs->drv;
3316
3317 trace_bdrv_lock_medium(bs, locked);
3318
3319 if (drv && drv->bdrv_lock_medium) {
3320 drv->bdrv_lock_medium(bs, locked);
3321 }
3322 }
3323
3324 /* needed for generic scsi interface */
3325
3326 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3327 {
3328 BlockDriver *drv = bs->drv;
3329
3330 if (drv && drv->bdrv_ioctl)
3331 return drv->bdrv_ioctl(bs, req, buf);
3332 return -ENOTSUP;
3333 }
3334
3335 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3336 unsigned long int req, void *buf,
3337 BlockDriverCompletionFunc *cb, void *opaque)
3338 {
3339 BlockDriver *drv = bs->drv;
3340
3341 if (drv && drv->bdrv_aio_ioctl)
3342 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3343 return NULL;
3344 }
3345
3346 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3347 {
3348 bs->buffer_alignment = align;
3349 }
3350
3351 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3352 {
3353 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3354 }
3355
3356 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3357 {
3358 int64_t bitmap_size;
3359
3360 bs->dirty_count = 0;
3361 if (enable) {
3362 if (!bs->dirty_bitmap) {
3363 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
3364 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
3365 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
3366
3367 bs->dirty_bitmap = g_malloc0(bitmap_size);
3368 }
3369 } else {
3370 if (bs->dirty_bitmap) {
3371 g_free(bs->dirty_bitmap);
3372 bs->dirty_bitmap = NULL;
3373 }
3374 }
3375 }
3376
3377 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
3378 {
3379 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
3380
3381 if (bs->dirty_bitmap &&
3382 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
3383 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
3384 (1UL << (chunk % (sizeof(unsigned long) * 8))));
3385 } else {
3386 return 0;
3387 }
3388 }
3389
3390 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
3391 int nr_sectors)
3392 {
3393 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
3394 }
3395
3396 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
3397 {
3398 return bs->dirty_count;
3399 }
3400
3401 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
3402 {
3403 assert(bs->in_use != in_use);
3404 bs->in_use = in_use;
3405 }
3406
3407 int bdrv_in_use(BlockDriverState *bs)
3408 {
3409 return bs->in_use;
3410 }
3411
3412 void bdrv_iostatus_enable(BlockDriverState *bs)
3413 {
3414 bs->iostatus_enabled = true;
3415 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3416 }
3417
3418 /* The I/O status is only enabled if the drive explicitly
3419 * enables it _and_ the VM is configured to stop on errors */
3420 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
3421 {
3422 return (bs->iostatus_enabled &&
3423 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
3424 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
3425 bs->on_read_error == BLOCK_ERR_STOP_ANY));
3426 }
3427
3428 void bdrv_iostatus_disable(BlockDriverState *bs)
3429 {
3430 bs->iostatus_enabled = false;
3431 }
3432
3433 void bdrv_iostatus_reset(BlockDriverState *bs)
3434 {
3435 if (bdrv_iostatus_is_enabled(bs)) {
3436 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3437 }
3438 }
3439
3440 /* XXX: Today this is set by device models because it makes the implementation
3441 quite simple. However, the block layer knows about the error, so it's
3442 possible to implement this without device models being involved */
3443 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
3444 {
3445 if (bdrv_iostatus_is_enabled(bs) &&
3446 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
3447 assert(error >= 0);
3448 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
3449 BLOCK_DEVICE_IO_STATUS_FAILED;
3450 }
3451 }
3452
3453 void
3454 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
3455 enum BlockAcctType type)
3456 {
3457 assert(type < BDRV_MAX_IOTYPE);
3458
3459 cookie->bytes = bytes;
3460 cookie->start_time_ns = get_clock();
3461 cookie->type = type;
3462 }
3463
3464 void
3465 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
3466 {
3467 assert(cookie->type < BDRV_MAX_IOTYPE);
3468
3469 bs->nr_bytes[cookie->type] += cookie->bytes;
3470 bs->nr_ops[cookie->type]++;
3471 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
3472 }
3473
3474 int bdrv_img_create(const char *filename, const char *fmt,
3475 const char *base_filename, const char *base_fmt,
3476 char *options, uint64_t img_size, int flags)
3477 {
3478 QEMUOptionParameter *param = NULL, *create_options = NULL;
3479 QEMUOptionParameter *backing_fmt, *backing_file, *size;
3480 BlockDriverState *bs = NULL;
3481 BlockDriver *drv, *proto_drv;
3482 BlockDriver *backing_drv = NULL;
3483 int ret = 0;
3484
3485 /* Find driver and parse its options */
3486 drv = bdrv_find_format(fmt);
3487 if (!drv) {
3488 error_report("Unknown file format '%s'", fmt);
3489 ret = -EINVAL;
3490 goto out;
3491 }
3492
3493 proto_drv = bdrv_find_protocol(filename);
3494 if (!proto_drv) {
3495 error_report("Unknown protocol '%s'", filename);
3496 ret = -EINVAL;
3497 goto out;
3498 }
3499
3500 create_options = append_option_parameters(create_options,
3501 drv->create_options);
3502 create_options = append_option_parameters(create_options,
3503 proto_drv->create_options);
3504
3505 /* Create parameter list with default values */
3506 param = parse_option_parameters("", create_options, param);
3507
3508 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
3509
3510 /* Parse -o options */
3511 if (options) {
3512 param = parse_option_parameters(options, create_options, param);
3513 if (param == NULL) {
3514 error_report("Invalid options for file format '%s'.", fmt);
3515 ret = -EINVAL;
3516 goto out;
3517 }
3518 }
3519
3520 if (base_filename) {
3521 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
3522 base_filename)) {
3523 error_report("Backing file not supported for file format '%s'",
3524 fmt);
3525 ret = -EINVAL;
3526 goto out;
3527 }
3528 }
3529
3530 if (base_fmt) {
3531 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
3532 error_report("Backing file format not supported for file "
3533 "format '%s'", fmt);
3534 ret = -EINVAL;
3535 goto out;
3536 }
3537 }
3538
3539 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
3540 if (backing_file && backing_file->value.s) {
3541 if (!strcmp(filename, backing_file->value.s)) {
3542 error_report("Error: Trying to create an image with the "
3543 "same filename as the backing file");
3544 ret = -EINVAL;
3545 goto out;
3546 }
3547 }
3548
3549 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
3550 if (backing_fmt && backing_fmt->value.s) {
3551 backing_drv = bdrv_find_format(backing_fmt->value.s);
3552 if (!backing_drv) {
3553 error_report("Unknown backing file format '%s'",
3554 backing_fmt->value.s);
3555 ret = -EINVAL;
3556 goto out;
3557 }
3558 }
3559
3560 // The size for the image must always be specified, with one exception:
3561 // If we are using a backing file, we can obtain the size from there
3562 size = get_option_parameter(param, BLOCK_OPT_SIZE);
3563 if (size && size->value.n == -1) {
3564 if (backing_file && backing_file->value.s) {
3565 uint64_t size;
3566 char buf[32];
3567
3568 bs = bdrv_new("");
3569
3570 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);
3571 if (ret < 0) {
3572 error_report("Could not open '%s'", backing_file->value.s);
3573 goto out;
3574 }
3575 bdrv_get_geometry(bs, &size);
3576 size *= 512;
3577
3578 snprintf(buf, sizeof(buf), "%" PRId64, size);
3579 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
3580 } else {
3581 error_report("Image creation needs a size parameter");
3582 ret = -EINVAL;
3583 goto out;
3584 }
3585 }
3586
3587 printf("Formatting '%s', fmt=%s ", filename, fmt);
3588 print_option_parameters(param);
3589 puts("");
3590
3591 ret = bdrv_create(drv, filename, param);
3592
3593 if (ret < 0) {
3594 if (ret == -ENOTSUP) {
3595 error_report("Formatting or formatting option not supported for "
3596 "file format '%s'", fmt);
3597 } else if (ret == -EFBIG) {
3598 error_report("The image size is too large for file format '%s'",
3599 fmt);
3600 } else {
3601 error_report("%s: error while creating %s: %s", filename, fmt,
3602 strerror(-ret));
3603 }
3604 }
3605
3606 out:
3607 free_option_parameters(create_options);
3608 free_option_parameters(param);
3609
3610 if (bs) {
3611 bdrv_delete(bs);
3612 }
3613
3614 return ret;
3615 }