]> git.proxmox.com Git - qemu.git/blob - block.c
block: Introduce path_has_protocol() function
[qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qemu-objects.h"
31
32 #ifdef CONFIG_BSD
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <sys/queue.h>
37 #ifndef __DragonFly__
38 #include <sys/disk.h>
39 #endif
40 #endif
41
42 #ifdef _WIN32
43 #include <windows.h>
44 #endif
45
46 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
47 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
48 BlockDriverCompletionFunc *cb, void *opaque);
49 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
50 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
51 BlockDriverCompletionFunc *cb, void *opaque);
52 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
53 BlockDriverCompletionFunc *cb, void *opaque);
54 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
55 BlockDriverCompletionFunc *cb, void *opaque);
56 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
57 uint8_t *buf, int nb_sectors);
58 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
59 const uint8_t *buf, int nb_sectors);
60
61 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
62 QTAILQ_HEAD_INITIALIZER(bdrv_states);
63
64 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
65 QLIST_HEAD_INITIALIZER(bdrv_drivers);
66
67 /* The device to use for VM snapshots */
68 static BlockDriverState *bs_snapshots;
69
70 /* If non-zero, use only whitelisted block drivers */
71 static int use_bdrv_whitelist;
72
73 #ifdef _WIN32
74 static int is_windows_drive_prefix(const char *filename)
75 {
76 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
77 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
78 filename[1] == ':');
79 }
80
81 int is_windows_drive(const char *filename)
82 {
83 if (is_windows_drive_prefix(filename) &&
84 filename[2] == '\0')
85 return 1;
86 if (strstart(filename, "\\\\.\\", NULL) ||
87 strstart(filename, "//./", NULL))
88 return 1;
89 return 0;
90 }
91 #endif
92
93 /* check if the path starts with "<protocol>:" */
94 static int path_has_protocol(const char *path)
95 {
96 #ifdef _WIN32
97 if (is_windows_drive(path) ||
98 is_windows_drive_prefix(path)) {
99 return 0;
100 }
101 #endif
102
103 return strchr(path, ':') != NULL;
104 }
105
106 int path_is_absolute(const char *path)
107 {
108 const char *p;
109 #ifdef _WIN32
110 /* specific case for names like: "\\.\d:" */
111 if (*path == '/' || *path == '\\')
112 return 1;
113 #endif
114 p = strchr(path, ':');
115 if (p)
116 p++;
117 else
118 p = path;
119 #ifdef _WIN32
120 return (*p == '/' || *p == '\\');
121 #else
122 return (*p == '/');
123 #endif
124 }
125
126 /* if filename is absolute, just copy it to dest. Otherwise, build a
127 path to it by considering it is relative to base_path. URL are
128 supported. */
129 void path_combine(char *dest, int dest_size,
130 const char *base_path,
131 const char *filename)
132 {
133 const char *p, *p1;
134 int len;
135
136 if (dest_size <= 0)
137 return;
138 if (path_is_absolute(filename)) {
139 pstrcpy(dest, dest_size, filename);
140 } else {
141 p = strchr(base_path, ':');
142 if (p)
143 p++;
144 else
145 p = base_path;
146 p1 = strrchr(base_path, '/');
147 #ifdef _WIN32
148 {
149 const char *p2;
150 p2 = strrchr(base_path, '\\');
151 if (!p1 || p2 > p1)
152 p1 = p2;
153 }
154 #endif
155 if (p1)
156 p1++;
157 else
158 p1 = base_path;
159 if (p1 > p)
160 p = p1;
161 len = p - base_path;
162 if (len > dest_size - 1)
163 len = dest_size - 1;
164 memcpy(dest, base_path, len);
165 dest[len] = '\0';
166 pstrcat(dest, dest_size, filename);
167 }
168 }
169
170 void bdrv_register(BlockDriver *bdrv)
171 {
172 if (!bdrv->bdrv_aio_readv) {
173 /* add AIO emulation layer */
174 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
175 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
176 } else if (!bdrv->bdrv_read) {
177 /* add synchronous IO emulation layer */
178 bdrv->bdrv_read = bdrv_read_em;
179 bdrv->bdrv_write = bdrv_write_em;
180 }
181
182 if (!bdrv->bdrv_aio_flush)
183 bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
184
185 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
186 }
187
188 /* create a new block device (by default it is empty) */
189 BlockDriverState *bdrv_new(const char *device_name)
190 {
191 BlockDriverState *bs;
192
193 bs = qemu_mallocz(sizeof(BlockDriverState));
194 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
195 if (device_name[0] != '\0') {
196 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
197 }
198 return bs;
199 }
200
201 BlockDriver *bdrv_find_format(const char *format_name)
202 {
203 BlockDriver *drv1;
204 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
205 if (!strcmp(drv1->format_name, format_name)) {
206 return drv1;
207 }
208 }
209 return NULL;
210 }
211
212 static int bdrv_is_whitelisted(BlockDriver *drv)
213 {
214 static const char *whitelist[] = {
215 CONFIG_BDRV_WHITELIST
216 };
217 const char **p;
218
219 if (!whitelist[0])
220 return 1; /* no whitelist, anything goes */
221
222 for (p = whitelist; *p; p++) {
223 if (!strcmp(drv->format_name, *p)) {
224 return 1;
225 }
226 }
227 return 0;
228 }
229
230 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
231 {
232 BlockDriver *drv = bdrv_find_format(format_name);
233 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
234 }
235
236 int bdrv_create(BlockDriver *drv, const char* filename,
237 QEMUOptionParameter *options)
238 {
239 if (!drv->bdrv_create)
240 return -ENOTSUP;
241
242 return drv->bdrv_create(filename, options);
243 }
244
245 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
246 {
247 BlockDriver *drv;
248
249 drv = bdrv_find_protocol(filename);
250 if (drv == NULL) {
251 return -ENOENT;
252 }
253
254 return bdrv_create(drv, filename, options);
255 }
256
257 #ifdef _WIN32
258 void get_tmp_filename(char *filename, int size)
259 {
260 char temp_dir[MAX_PATH];
261
262 GetTempPath(MAX_PATH, temp_dir);
263 GetTempFileName(temp_dir, "qem", 0, filename);
264 }
265 #else
266 void get_tmp_filename(char *filename, int size)
267 {
268 int fd;
269 const char *tmpdir;
270 /* XXX: race condition possible */
271 tmpdir = getenv("TMPDIR");
272 if (!tmpdir)
273 tmpdir = "/tmp";
274 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
275 fd = mkstemp(filename);
276 close(fd);
277 }
278 #endif
279
280 /*
281 * Detect host devices. By convention, /dev/cdrom[N] is always
282 * recognized as a host CDROM.
283 */
284 static BlockDriver *find_hdev_driver(const char *filename)
285 {
286 int score_max = 0, score;
287 BlockDriver *drv = NULL, *d;
288
289 QLIST_FOREACH(d, &bdrv_drivers, list) {
290 if (d->bdrv_probe_device) {
291 score = d->bdrv_probe_device(filename);
292 if (score > score_max) {
293 score_max = score;
294 drv = d;
295 }
296 }
297 }
298
299 return drv;
300 }
301
302 BlockDriver *bdrv_find_protocol(const char *filename)
303 {
304 BlockDriver *drv1;
305 char protocol[128];
306 int len;
307 const char *p;
308
309 /* TODO Drivers without bdrv_file_open must be specified explicitly */
310
311 /*
312 * XXX(hch): we really should not let host device detection
313 * override an explicit protocol specification, but moving this
314 * later breaks access to device names with colons in them.
315 * Thanks to the brain-dead persistent naming schemes on udev-
316 * based Linux systems those actually are quite common.
317 */
318 drv1 = find_hdev_driver(filename);
319 if (drv1) {
320 return drv1;
321 }
322
323 if (!path_has_protocol(filename)) {
324 return bdrv_find_format("file");
325 }
326 p = strchr(filename, ':');
327 assert(p != NULL);
328 len = p - filename;
329 if (len > sizeof(protocol) - 1)
330 len = sizeof(protocol) - 1;
331 memcpy(protocol, filename, len);
332 protocol[len] = '\0';
333 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
334 if (drv1->protocol_name &&
335 !strcmp(drv1->protocol_name, protocol)) {
336 return drv1;
337 }
338 }
339 return NULL;
340 }
341
342 static int find_image_format(const char *filename, BlockDriver **pdrv)
343 {
344 int ret, score, score_max;
345 BlockDriver *drv1, *drv;
346 uint8_t buf[2048];
347 BlockDriverState *bs;
348
349 ret = bdrv_file_open(&bs, filename, 0);
350 if (ret < 0) {
351 *pdrv = NULL;
352 return ret;
353 }
354
355 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
356 if (bs->sg || !bdrv_is_inserted(bs)) {
357 bdrv_delete(bs);
358 drv = bdrv_find_format("raw");
359 if (!drv) {
360 ret = -ENOENT;
361 }
362 *pdrv = drv;
363 return ret;
364 }
365
366 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
367 bdrv_delete(bs);
368 if (ret < 0) {
369 *pdrv = NULL;
370 return ret;
371 }
372
373 score_max = 0;
374 drv = NULL;
375 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
376 if (drv1->bdrv_probe) {
377 score = drv1->bdrv_probe(buf, ret, filename);
378 if (score > score_max) {
379 score_max = score;
380 drv = drv1;
381 }
382 }
383 }
384 if (!drv) {
385 ret = -ENOENT;
386 }
387 *pdrv = drv;
388 return ret;
389 }
390
391 /**
392 * Set the current 'total_sectors' value
393 */
394 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
395 {
396 BlockDriver *drv = bs->drv;
397
398 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
399 if (bs->sg)
400 return 0;
401
402 /* query actual device if possible, otherwise just trust the hint */
403 if (drv->bdrv_getlength) {
404 int64_t length = drv->bdrv_getlength(bs);
405 if (length < 0) {
406 return length;
407 }
408 hint = length >> BDRV_SECTOR_BITS;
409 }
410
411 bs->total_sectors = hint;
412 return 0;
413 }
414
415 /*
416 * Common part for opening disk images and files
417 */
418 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
419 int flags, BlockDriver *drv)
420 {
421 int ret, open_flags;
422
423 assert(drv != NULL);
424
425 bs->file = NULL;
426 bs->total_sectors = 0;
427 bs->encrypted = 0;
428 bs->valid_key = 0;
429 bs->open_flags = flags;
430 /* buffer_alignment defaulted to 512, drivers can change this value */
431 bs->buffer_alignment = 512;
432
433 pstrcpy(bs->filename, sizeof(bs->filename), filename);
434
435 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
436 return -ENOTSUP;
437 }
438
439 bs->drv = drv;
440 bs->opaque = qemu_mallocz(drv->instance_size);
441
442 /*
443 * Yes, BDRV_O_NOCACHE aka O_DIRECT means we have to present a
444 * write cache to the guest. We do need the fdatasync to flush
445 * out transactions for block allocations, and we maybe have a
446 * volatile write cache in our backing device to deal with.
447 */
448 if (flags & (BDRV_O_CACHE_WB|BDRV_O_NOCACHE))
449 bs->enable_write_cache = 1;
450
451 /*
452 * Clear flags that are internal to the block layer before opening the
453 * image.
454 */
455 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
456
457 /*
458 * Snapshots should be writeable.
459 */
460 if (bs->is_temporary) {
461 open_flags |= BDRV_O_RDWR;
462 }
463
464 /* Open the image, either directly or using a protocol */
465 if (drv->bdrv_file_open) {
466 ret = drv->bdrv_file_open(bs, filename, open_flags);
467 } else {
468 ret = bdrv_file_open(&bs->file, filename, open_flags);
469 if (ret >= 0) {
470 ret = drv->bdrv_open(bs, open_flags);
471 }
472 }
473
474 if (ret < 0) {
475 goto free_and_fail;
476 }
477
478 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
479
480 ret = refresh_total_sectors(bs, bs->total_sectors);
481 if (ret < 0) {
482 goto free_and_fail;
483 }
484
485 #ifndef _WIN32
486 if (bs->is_temporary) {
487 unlink(filename);
488 }
489 #endif
490 return 0;
491
492 free_and_fail:
493 if (bs->file) {
494 bdrv_delete(bs->file);
495 bs->file = NULL;
496 }
497 qemu_free(bs->opaque);
498 bs->opaque = NULL;
499 bs->drv = NULL;
500 return ret;
501 }
502
503 /*
504 * Opens a file using a protocol (file, host_device, nbd, ...)
505 */
506 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
507 {
508 BlockDriverState *bs;
509 BlockDriver *drv;
510 int ret;
511
512 drv = bdrv_find_protocol(filename);
513 if (!drv) {
514 return -ENOENT;
515 }
516
517 bs = bdrv_new("");
518 ret = bdrv_open_common(bs, filename, flags, drv);
519 if (ret < 0) {
520 bdrv_delete(bs);
521 return ret;
522 }
523 bs->growable = 1;
524 *pbs = bs;
525 return 0;
526 }
527
528 /*
529 * Opens a disk image (raw, qcow2, vmdk, ...)
530 */
531 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
532 BlockDriver *drv)
533 {
534 int ret;
535
536 if (flags & BDRV_O_SNAPSHOT) {
537 BlockDriverState *bs1;
538 int64_t total_size;
539 int is_protocol = 0;
540 BlockDriver *bdrv_qcow2;
541 QEMUOptionParameter *options;
542 char tmp_filename[PATH_MAX];
543 char backing_filename[PATH_MAX];
544
545 /* if snapshot, we create a temporary backing file and open it
546 instead of opening 'filename' directly */
547
548 /* if there is a backing file, use it */
549 bs1 = bdrv_new("");
550 ret = bdrv_open(bs1, filename, 0, drv);
551 if (ret < 0) {
552 bdrv_delete(bs1);
553 return ret;
554 }
555 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
556
557 if (bs1->drv && bs1->drv->protocol_name)
558 is_protocol = 1;
559
560 bdrv_delete(bs1);
561
562 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
563
564 /* Real path is meaningless for protocols */
565 if (is_protocol)
566 snprintf(backing_filename, sizeof(backing_filename),
567 "%s", filename);
568 else if (!realpath(filename, backing_filename))
569 return -errno;
570
571 bdrv_qcow2 = bdrv_find_format("qcow2");
572 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
573
574 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
575 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
576 if (drv) {
577 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
578 drv->format_name);
579 }
580
581 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
582 free_option_parameters(options);
583 if (ret < 0) {
584 return ret;
585 }
586
587 filename = tmp_filename;
588 drv = bdrv_qcow2;
589 bs->is_temporary = 1;
590 }
591
592 /* Find the right image format driver */
593 if (!drv) {
594 ret = find_image_format(filename, &drv);
595 }
596
597 if (!drv) {
598 goto unlink_and_fail;
599 }
600
601 /* Open the image */
602 ret = bdrv_open_common(bs, filename, flags, drv);
603 if (ret < 0) {
604 goto unlink_and_fail;
605 }
606
607 /* If there is a backing file, use it */
608 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
609 char backing_filename[PATH_MAX];
610 int back_flags;
611 BlockDriver *back_drv = NULL;
612
613 bs->backing_hd = bdrv_new("");
614 path_combine(backing_filename, sizeof(backing_filename),
615 filename, bs->backing_file);
616 if (bs->backing_format[0] != '\0')
617 back_drv = bdrv_find_format(bs->backing_format);
618
619 /* backing files always opened read-only */
620 back_flags =
621 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
622
623 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
624 if (ret < 0) {
625 bdrv_close(bs);
626 return ret;
627 }
628 if (bs->is_temporary) {
629 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
630 } else {
631 /* base image inherits from "parent" */
632 bs->backing_hd->keep_read_only = bs->keep_read_only;
633 }
634 }
635
636 if (!bdrv_key_required(bs)) {
637 /* call the change callback */
638 bs->media_changed = 1;
639 if (bs->change_cb)
640 bs->change_cb(bs->change_opaque);
641 }
642
643 return 0;
644
645 unlink_and_fail:
646 if (bs->is_temporary) {
647 unlink(filename);
648 }
649 return ret;
650 }
651
652 void bdrv_close(BlockDriverState *bs)
653 {
654 if (bs->drv) {
655 if (bs == bs_snapshots) {
656 bs_snapshots = NULL;
657 }
658 if (bs->backing_hd) {
659 bdrv_delete(bs->backing_hd);
660 bs->backing_hd = NULL;
661 }
662 bs->drv->bdrv_close(bs);
663 qemu_free(bs->opaque);
664 #ifdef _WIN32
665 if (bs->is_temporary) {
666 unlink(bs->filename);
667 }
668 #endif
669 bs->opaque = NULL;
670 bs->drv = NULL;
671
672 if (bs->file != NULL) {
673 bdrv_close(bs->file);
674 }
675
676 /* call the change callback */
677 bs->media_changed = 1;
678 if (bs->change_cb)
679 bs->change_cb(bs->change_opaque);
680 }
681 }
682
683 void bdrv_close_all(void)
684 {
685 BlockDriverState *bs;
686
687 QTAILQ_FOREACH(bs, &bdrv_states, list) {
688 bdrv_close(bs);
689 }
690 }
691
692 void bdrv_delete(BlockDriverState *bs)
693 {
694 assert(!bs->peer);
695
696 /* remove from list, if necessary */
697 if (bs->device_name[0] != '\0') {
698 QTAILQ_REMOVE(&bdrv_states, bs, list);
699 }
700
701 bdrv_close(bs);
702 if (bs->file != NULL) {
703 bdrv_delete(bs->file);
704 }
705
706 assert(bs != bs_snapshots);
707 qemu_free(bs);
708 }
709
710 int bdrv_attach(BlockDriverState *bs, DeviceState *qdev)
711 {
712 if (bs->peer) {
713 return -EBUSY;
714 }
715 bs->peer = qdev;
716 return 0;
717 }
718
719 void bdrv_detach(BlockDriverState *bs, DeviceState *qdev)
720 {
721 assert(bs->peer == qdev);
722 bs->peer = NULL;
723 }
724
725 DeviceState *bdrv_get_attached(BlockDriverState *bs)
726 {
727 return bs->peer;
728 }
729
730 /*
731 * Run consistency checks on an image
732 *
733 * Returns 0 if the check could be completed (it doesn't mean that the image is
734 * free of errors) or -errno when an internal error occured. The results of the
735 * check are stored in res.
736 */
737 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
738 {
739 if (bs->drv->bdrv_check == NULL) {
740 return -ENOTSUP;
741 }
742
743 memset(res, 0, sizeof(*res));
744 return bs->drv->bdrv_check(bs, res);
745 }
746
747 #define COMMIT_BUF_SECTORS 2048
748
749 /* commit COW file into the raw image */
750 int bdrv_commit(BlockDriverState *bs)
751 {
752 BlockDriver *drv = bs->drv;
753 BlockDriver *backing_drv;
754 int64_t sector, total_sectors;
755 int n, ro, open_flags;
756 int ret = 0, rw_ret = 0;
757 uint8_t *buf;
758 char filename[1024];
759 BlockDriverState *bs_rw, *bs_ro;
760
761 if (!drv)
762 return -ENOMEDIUM;
763
764 if (!bs->backing_hd) {
765 return -ENOTSUP;
766 }
767
768 if (bs->backing_hd->keep_read_only) {
769 return -EACCES;
770 }
771
772 backing_drv = bs->backing_hd->drv;
773 ro = bs->backing_hd->read_only;
774 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
775 open_flags = bs->backing_hd->open_flags;
776
777 if (ro) {
778 /* re-open as RW */
779 bdrv_delete(bs->backing_hd);
780 bs->backing_hd = NULL;
781 bs_rw = bdrv_new("");
782 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
783 backing_drv);
784 if (rw_ret < 0) {
785 bdrv_delete(bs_rw);
786 /* try to re-open read-only */
787 bs_ro = bdrv_new("");
788 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
789 backing_drv);
790 if (ret < 0) {
791 bdrv_delete(bs_ro);
792 /* drive not functional anymore */
793 bs->drv = NULL;
794 return ret;
795 }
796 bs->backing_hd = bs_ro;
797 return rw_ret;
798 }
799 bs->backing_hd = bs_rw;
800 }
801
802 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
803 buf = qemu_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
804
805 for (sector = 0; sector < total_sectors; sector += n) {
806 if (drv->bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
807
808 if (bdrv_read(bs, sector, buf, n) != 0) {
809 ret = -EIO;
810 goto ro_cleanup;
811 }
812
813 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
814 ret = -EIO;
815 goto ro_cleanup;
816 }
817 }
818 }
819
820 if (drv->bdrv_make_empty) {
821 ret = drv->bdrv_make_empty(bs);
822 bdrv_flush(bs);
823 }
824
825 /*
826 * Make sure all data we wrote to the backing device is actually
827 * stable on disk.
828 */
829 if (bs->backing_hd)
830 bdrv_flush(bs->backing_hd);
831
832 ro_cleanup:
833 qemu_free(buf);
834
835 if (ro) {
836 /* re-open as RO */
837 bdrv_delete(bs->backing_hd);
838 bs->backing_hd = NULL;
839 bs_ro = bdrv_new("");
840 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
841 backing_drv);
842 if (ret < 0) {
843 bdrv_delete(bs_ro);
844 /* drive not functional anymore */
845 bs->drv = NULL;
846 return ret;
847 }
848 bs->backing_hd = bs_ro;
849 bs->backing_hd->keep_read_only = 0;
850 }
851
852 return ret;
853 }
854
855 void bdrv_commit_all(void)
856 {
857 BlockDriverState *bs;
858
859 QTAILQ_FOREACH(bs, &bdrv_states, list) {
860 bdrv_commit(bs);
861 }
862 }
863
864 /*
865 * Return values:
866 * 0 - success
867 * -EINVAL - backing format specified, but no file
868 * -ENOSPC - can't update the backing file because no space is left in the
869 * image file header
870 * -ENOTSUP - format driver doesn't support changing the backing file
871 */
872 int bdrv_change_backing_file(BlockDriverState *bs,
873 const char *backing_file, const char *backing_fmt)
874 {
875 BlockDriver *drv = bs->drv;
876
877 if (drv->bdrv_change_backing_file != NULL) {
878 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
879 } else {
880 return -ENOTSUP;
881 }
882 }
883
884 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
885 size_t size)
886 {
887 int64_t len;
888
889 if (!bdrv_is_inserted(bs))
890 return -ENOMEDIUM;
891
892 if (bs->growable)
893 return 0;
894
895 len = bdrv_getlength(bs);
896
897 if (offset < 0)
898 return -EIO;
899
900 if ((offset > len) || (len - offset < size))
901 return -EIO;
902
903 return 0;
904 }
905
906 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
907 int nb_sectors)
908 {
909 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
910 nb_sectors * BDRV_SECTOR_SIZE);
911 }
912
913 /* return < 0 if error. See bdrv_write() for the return codes */
914 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
915 uint8_t *buf, int nb_sectors)
916 {
917 BlockDriver *drv = bs->drv;
918
919 if (!drv)
920 return -ENOMEDIUM;
921 if (bdrv_check_request(bs, sector_num, nb_sectors))
922 return -EIO;
923
924 return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
925 }
926
927 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
928 int nb_sectors, int dirty)
929 {
930 int64_t start, end;
931 unsigned long val, idx, bit;
932
933 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
934 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
935
936 for (; start <= end; start++) {
937 idx = start / (sizeof(unsigned long) * 8);
938 bit = start % (sizeof(unsigned long) * 8);
939 val = bs->dirty_bitmap[idx];
940 if (dirty) {
941 if (!(val & (1UL << bit))) {
942 bs->dirty_count++;
943 val |= 1UL << bit;
944 }
945 } else {
946 if (val & (1UL << bit)) {
947 bs->dirty_count--;
948 val &= ~(1UL << bit);
949 }
950 }
951 bs->dirty_bitmap[idx] = val;
952 }
953 }
954
955 /* Return < 0 if error. Important errors are:
956 -EIO generic I/O error (may happen for all errors)
957 -ENOMEDIUM No media inserted.
958 -EINVAL Invalid sector number or nb_sectors
959 -EACCES Trying to write a read-only device
960 */
961 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
962 const uint8_t *buf, int nb_sectors)
963 {
964 BlockDriver *drv = bs->drv;
965 if (!bs->drv)
966 return -ENOMEDIUM;
967 if (bs->read_only)
968 return -EACCES;
969 if (bdrv_check_request(bs, sector_num, nb_sectors))
970 return -EIO;
971
972 if (bs->dirty_bitmap) {
973 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
974 }
975
976 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
977 bs->wr_highest_sector = sector_num + nb_sectors - 1;
978 }
979
980 return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
981 }
982
983 int bdrv_pread(BlockDriverState *bs, int64_t offset,
984 void *buf, int count1)
985 {
986 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
987 int len, nb_sectors, count;
988 int64_t sector_num;
989 int ret;
990
991 count = count1;
992 /* first read to align to sector start */
993 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
994 if (len > count)
995 len = count;
996 sector_num = offset >> BDRV_SECTOR_BITS;
997 if (len > 0) {
998 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
999 return ret;
1000 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1001 count -= len;
1002 if (count == 0)
1003 return count1;
1004 sector_num++;
1005 buf += len;
1006 }
1007
1008 /* read the sectors "in place" */
1009 nb_sectors = count >> BDRV_SECTOR_BITS;
1010 if (nb_sectors > 0) {
1011 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1012 return ret;
1013 sector_num += nb_sectors;
1014 len = nb_sectors << BDRV_SECTOR_BITS;
1015 buf += len;
1016 count -= len;
1017 }
1018
1019 /* add data from the last sector */
1020 if (count > 0) {
1021 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1022 return ret;
1023 memcpy(buf, tmp_buf, count);
1024 }
1025 return count1;
1026 }
1027
1028 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1029 const void *buf, int count1)
1030 {
1031 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1032 int len, nb_sectors, count;
1033 int64_t sector_num;
1034 int ret;
1035
1036 count = count1;
1037 /* first write to align to sector start */
1038 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1039 if (len > count)
1040 len = count;
1041 sector_num = offset >> BDRV_SECTOR_BITS;
1042 if (len > 0) {
1043 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1044 return ret;
1045 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1046 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1047 return ret;
1048 count -= len;
1049 if (count == 0)
1050 return count1;
1051 sector_num++;
1052 buf += len;
1053 }
1054
1055 /* write the sectors "in place" */
1056 nb_sectors = count >> BDRV_SECTOR_BITS;
1057 if (nb_sectors > 0) {
1058 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1059 return ret;
1060 sector_num += nb_sectors;
1061 len = nb_sectors << BDRV_SECTOR_BITS;
1062 buf += len;
1063 count -= len;
1064 }
1065
1066 /* add data from the last sector */
1067 if (count > 0) {
1068 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1069 return ret;
1070 memcpy(tmp_buf, buf, count);
1071 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1072 return ret;
1073 }
1074 return count1;
1075 }
1076
1077 /*
1078 * Writes to the file and ensures that no writes are reordered across this
1079 * request (acts as a barrier)
1080 *
1081 * Returns 0 on success, -errno in error cases.
1082 */
1083 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1084 const void *buf, int count)
1085 {
1086 int ret;
1087
1088 ret = bdrv_pwrite(bs, offset, buf, count);
1089 if (ret < 0) {
1090 return ret;
1091 }
1092
1093 /* No flush needed for cache=writethrough, it uses O_DSYNC */
1094 if ((bs->open_flags & BDRV_O_CACHE_MASK) != 0) {
1095 bdrv_flush(bs);
1096 }
1097
1098 return 0;
1099 }
1100
1101 /*
1102 * Writes to the file and ensures that no writes are reordered across this
1103 * request (acts as a barrier)
1104 *
1105 * Returns 0 on success, -errno in error cases.
1106 */
1107 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,
1108 const uint8_t *buf, int nb_sectors)
1109 {
1110 return bdrv_pwrite_sync(bs, BDRV_SECTOR_SIZE * sector_num,
1111 buf, BDRV_SECTOR_SIZE * nb_sectors);
1112 }
1113
1114 /**
1115 * Truncate file to 'offset' bytes (needed only for file protocols)
1116 */
1117 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1118 {
1119 BlockDriver *drv = bs->drv;
1120 int ret;
1121 if (!drv)
1122 return -ENOMEDIUM;
1123 if (!drv->bdrv_truncate)
1124 return -ENOTSUP;
1125 if (bs->read_only)
1126 return -EACCES;
1127 ret = drv->bdrv_truncate(bs, offset);
1128 if (ret == 0) {
1129 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1130 }
1131 return ret;
1132 }
1133
1134 /**
1135 * Length of a file in bytes. Return < 0 if error or unknown.
1136 */
1137 int64_t bdrv_getlength(BlockDriverState *bs)
1138 {
1139 BlockDriver *drv = bs->drv;
1140 if (!drv)
1141 return -ENOMEDIUM;
1142
1143 /* Fixed size devices use the total_sectors value for speed instead of
1144 issuing a length query (like lseek) on each call. Also, legacy block
1145 drivers don't provide a bdrv_getlength function and must use
1146 total_sectors. */
1147 if (!bs->growable || !drv->bdrv_getlength) {
1148 return bs->total_sectors * BDRV_SECTOR_SIZE;
1149 }
1150 return drv->bdrv_getlength(bs);
1151 }
1152
1153 /* return 0 as number of sectors if no device present or error */
1154 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1155 {
1156 int64_t length;
1157 length = bdrv_getlength(bs);
1158 if (length < 0)
1159 length = 0;
1160 else
1161 length = length >> BDRV_SECTOR_BITS;
1162 *nb_sectors_ptr = length;
1163 }
1164
1165 struct partition {
1166 uint8_t boot_ind; /* 0x80 - active */
1167 uint8_t head; /* starting head */
1168 uint8_t sector; /* starting sector */
1169 uint8_t cyl; /* starting cylinder */
1170 uint8_t sys_ind; /* What partition type */
1171 uint8_t end_head; /* end head */
1172 uint8_t end_sector; /* end sector */
1173 uint8_t end_cyl; /* end cylinder */
1174 uint32_t start_sect; /* starting sector counting from 0 */
1175 uint32_t nr_sects; /* nr of sectors in partition */
1176 } __attribute__((packed));
1177
1178 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1179 static int guess_disk_lchs(BlockDriverState *bs,
1180 int *pcylinders, int *pheads, int *psectors)
1181 {
1182 uint8_t buf[BDRV_SECTOR_SIZE];
1183 int ret, i, heads, sectors, cylinders;
1184 struct partition *p;
1185 uint32_t nr_sects;
1186 uint64_t nb_sectors;
1187
1188 bdrv_get_geometry(bs, &nb_sectors);
1189
1190 ret = bdrv_read(bs, 0, buf, 1);
1191 if (ret < 0)
1192 return -1;
1193 /* test msdos magic */
1194 if (buf[510] != 0x55 || buf[511] != 0xaa)
1195 return -1;
1196 for(i = 0; i < 4; i++) {
1197 p = ((struct partition *)(buf + 0x1be)) + i;
1198 nr_sects = le32_to_cpu(p->nr_sects);
1199 if (nr_sects && p->end_head) {
1200 /* We make the assumption that the partition terminates on
1201 a cylinder boundary */
1202 heads = p->end_head + 1;
1203 sectors = p->end_sector & 63;
1204 if (sectors == 0)
1205 continue;
1206 cylinders = nb_sectors / (heads * sectors);
1207 if (cylinders < 1 || cylinders > 16383)
1208 continue;
1209 *pheads = heads;
1210 *psectors = sectors;
1211 *pcylinders = cylinders;
1212 #if 0
1213 printf("guessed geometry: LCHS=%d %d %d\n",
1214 cylinders, heads, sectors);
1215 #endif
1216 return 0;
1217 }
1218 }
1219 return -1;
1220 }
1221
1222 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1223 {
1224 int translation, lba_detected = 0;
1225 int cylinders, heads, secs;
1226 uint64_t nb_sectors;
1227
1228 /* if a geometry hint is available, use it */
1229 bdrv_get_geometry(bs, &nb_sectors);
1230 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1231 translation = bdrv_get_translation_hint(bs);
1232 if (cylinders != 0) {
1233 *pcyls = cylinders;
1234 *pheads = heads;
1235 *psecs = secs;
1236 } else {
1237 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1238 if (heads > 16) {
1239 /* if heads > 16, it means that a BIOS LBA
1240 translation was active, so the default
1241 hardware geometry is OK */
1242 lba_detected = 1;
1243 goto default_geometry;
1244 } else {
1245 *pcyls = cylinders;
1246 *pheads = heads;
1247 *psecs = secs;
1248 /* disable any translation to be in sync with
1249 the logical geometry */
1250 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1251 bdrv_set_translation_hint(bs,
1252 BIOS_ATA_TRANSLATION_NONE);
1253 }
1254 }
1255 } else {
1256 default_geometry:
1257 /* if no geometry, use a standard physical disk geometry */
1258 cylinders = nb_sectors / (16 * 63);
1259
1260 if (cylinders > 16383)
1261 cylinders = 16383;
1262 else if (cylinders < 2)
1263 cylinders = 2;
1264 *pcyls = cylinders;
1265 *pheads = 16;
1266 *psecs = 63;
1267 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1268 if ((*pcyls * *pheads) <= 131072) {
1269 bdrv_set_translation_hint(bs,
1270 BIOS_ATA_TRANSLATION_LARGE);
1271 } else {
1272 bdrv_set_translation_hint(bs,
1273 BIOS_ATA_TRANSLATION_LBA);
1274 }
1275 }
1276 }
1277 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1278 }
1279 }
1280
1281 void bdrv_set_geometry_hint(BlockDriverState *bs,
1282 int cyls, int heads, int secs)
1283 {
1284 bs->cyls = cyls;
1285 bs->heads = heads;
1286 bs->secs = secs;
1287 }
1288
1289 void bdrv_set_type_hint(BlockDriverState *bs, int type)
1290 {
1291 bs->type = type;
1292 bs->removable = ((type == BDRV_TYPE_CDROM ||
1293 type == BDRV_TYPE_FLOPPY));
1294 }
1295
1296 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1297 {
1298 bs->translation = translation;
1299 }
1300
1301 void bdrv_get_geometry_hint(BlockDriverState *bs,
1302 int *pcyls, int *pheads, int *psecs)
1303 {
1304 *pcyls = bs->cyls;
1305 *pheads = bs->heads;
1306 *psecs = bs->secs;
1307 }
1308
1309 int bdrv_get_type_hint(BlockDriverState *bs)
1310 {
1311 return bs->type;
1312 }
1313
1314 int bdrv_get_translation_hint(BlockDriverState *bs)
1315 {
1316 return bs->translation;
1317 }
1318
1319 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
1320 BlockErrorAction on_write_error)
1321 {
1322 bs->on_read_error = on_read_error;
1323 bs->on_write_error = on_write_error;
1324 }
1325
1326 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
1327 {
1328 return is_read ? bs->on_read_error : bs->on_write_error;
1329 }
1330
1331 void bdrv_set_removable(BlockDriverState *bs, int removable)
1332 {
1333 bs->removable = removable;
1334 if (removable && bs == bs_snapshots) {
1335 bs_snapshots = NULL;
1336 }
1337 }
1338
1339 int bdrv_is_removable(BlockDriverState *bs)
1340 {
1341 return bs->removable;
1342 }
1343
1344 int bdrv_is_read_only(BlockDriverState *bs)
1345 {
1346 return bs->read_only;
1347 }
1348
1349 int bdrv_is_sg(BlockDriverState *bs)
1350 {
1351 return bs->sg;
1352 }
1353
1354 int bdrv_enable_write_cache(BlockDriverState *bs)
1355 {
1356 return bs->enable_write_cache;
1357 }
1358
1359 /* XXX: no longer used */
1360 void bdrv_set_change_cb(BlockDriverState *bs,
1361 void (*change_cb)(void *opaque), void *opaque)
1362 {
1363 bs->change_cb = change_cb;
1364 bs->change_opaque = opaque;
1365 }
1366
1367 int bdrv_is_encrypted(BlockDriverState *bs)
1368 {
1369 if (bs->backing_hd && bs->backing_hd->encrypted)
1370 return 1;
1371 return bs->encrypted;
1372 }
1373
1374 int bdrv_key_required(BlockDriverState *bs)
1375 {
1376 BlockDriverState *backing_hd = bs->backing_hd;
1377
1378 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
1379 return 1;
1380 return (bs->encrypted && !bs->valid_key);
1381 }
1382
1383 int bdrv_set_key(BlockDriverState *bs, const char *key)
1384 {
1385 int ret;
1386 if (bs->backing_hd && bs->backing_hd->encrypted) {
1387 ret = bdrv_set_key(bs->backing_hd, key);
1388 if (ret < 0)
1389 return ret;
1390 if (!bs->encrypted)
1391 return 0;
1392 }
1393 if (!bs->encrypted) {
1394 return -EINVAL;
1395 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
1396 return -ENOMEDIUM;
1397 }
1398 ret = bs->drv->bdrv_set_key(bs, key);
1399 if (ret < 0) {
1400 bs->valid_key = 0;
1401 } else if (!bs->valid_key) {
1402 bs->valid_key = 1;
1403 /* call the change callback now, we skipped it on open */
1404 bs->media_changed = 1;
1405 if (bs->change_cb)
1406 bs->change_cb(bs->change_opaque);
1407 }
1408 return ret;
1409 }
1410
1411 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
1412 {
1413 if (!bs->drv) {
1414 buf[0] = '\0';
1415 } else {
1416 pstrcpy(buf, buf_size, bs->drv->format_name);
1417 }
1418 }
1419
1420 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
1421 void *opaque)
1422 {
1423 BlockDriver *drv;
1424
1425 QLIST_FOREACH(drv, &bdrv_drivers, list) {
1426 it(opaque, drv->format_name);
1427 }
1428 }
1429
1430 BlockDriverState *bdrv_find(const char *name)
1431 {
1432 BlockDriverState *bs;
1433
1434 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1435 if (!strcmp(name, bs->device_name)) {
1436 return bs;
1437 }
1438 }
1439 return NULL;
1440 }
1441
1442 BlockDriverState *bdrv_next(BlockDriverState *bs)
1443 {
1444 if (!bs) {
1445 return QTAILQ_FIRST(&bdrv_states);
1446 }
1447 return QTAILQ_NEXT(bs, list);
1448 }
1449
1450 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
1451 {
1452 BlockDriverState *bs;
1453
1454 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1455 it(opaque, bs);
1456 }
1457 }
1458
1459 const char *bdrv_get_device_name(BlockDriverState *bs)
1460 {
1461 return bs->device_name;
1462 }
1463
1464 int bdrv_flush(BlockDriverState *bs)
1465 {
1466 if (bs->open_flags & BDRV_O_NO_FLUSH) {
1467 return 0;
1468 }
1469
1470 if (bs->drv && bs->drv->bdrv_flush) {
1471 return bs->drv->bdrv_flush(bs);
1472 }
1473
1474 /*
1475 * Some block drivers always operate in either writethrough or unsafe mode
1476 * and don't support bdrv_flush therefore. Usually qemu doesn't know how
1477 * the server works (because the behaviour is hardcoded or depends on
1478 * server-side configuration), so we can't ensure that everything is safe
1479 * on disk. Returning an error doesn't work because that would break guests
1480 * even if the server operates in writethrough mode.
1481 *
1482 * Let's hope the user knows what he's doing.
1483 */
1484 return 0;
1485 }
1486
1487 void bdrv_flush_all(void)
1488 {
1489 BlockDriverState *bs;
1490
1491 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1492 if (bs->drv && !bdrv_is_read_only(bs) &&
1493 (!bdrv_is_removable(bs) || bdrv_is_inserted(bs))) {
1494 bdrv_flush(bs);
1495 }
1496 }
1497 }
1498
1499 int bdrv_has_zero_init(BlockDriverState *bs)
1500 {
1501 assert(bs->drv);
1502
1503 if (bs->drv->bdrv_has_zero_init) {
1504 return bs->drv->bdrv_has_zero_init(bs);
1505 }
1506
1507 return 1;
1508 }
1509
1510 /*
1511 * Returns true iff the specified sector is present in the disk image. Drivers
1512 * not implementing the functionality are assumed to not support backing files,
1513 * hence all their sectors are reported as allocated.
1514 *
1515 * 'pnum' is set to the number of sectors (including and immediately following
1516 * the specified sector) that are known to be in the same
1517 * allocated/unallocated state.
1518 *
1519 * 'nb_sectors' is the max value 'pnum' should be set to.
1520 */
1521 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1522 int *pnum)
1523 {
1524 int64_t n;
1525 if (!bs->drv->bdrv_is_allocated) {
1526 if (sector_num >= bs->total_sectors) {
1527 *pnum = 0;
1528 return 0;
1529 }
1530 n = bs->total_sectors - sector_num;
1531 *pnum = (n < nb_sectors) ? (n) : (nb_sectors);
1532 return 1;
1533 }
1534 return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);
1535 }
1536
1537 void bdrv_mon_event(const BlockDriverState *bdrv,
1538 BlockMonEventAction action, int is_read)
1539 {
1540 QObject *data;
1541 const char *action_str;
1542
1543 switch (action) {
1544 case BDRV_ACTION_REPORT:
1545 action_str = "report";
1546 break;
1547 case BDRV_ACTION_IGNORE:
1548 action_str = "ignore";
1549 break;
1550 case BDRV_ACTION_STOP:
1551 action_str = "stop";
1552 break;
1553 default:
1554 abort();
1555 }
1556
1557 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1558 bdrv->device_name,
1559 action_str,
1560 is_read ? "read" : "write");
1561 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1562
1563 qobject_decref(data);
1564 }
1565
1566 static void bdrv_print_dict(QObject *obj, void *opaque)
1567 {
1568 QDict *bs_dict;
1569 Monitor *mon = opaque;
1570
1571 bs_dict = qobject_to_qdict(obj);
1572
1573 monitor_printf(mon, "%s: type=%s removable=%d",
1574 qdict_get_str(bs_dict, "device"),
1575 qdict_get_str(bs_dict, "type"),
1576 qdict_get_bool(bs_dict, "removable"));
1577
1578 if (qdict_get_bool(bs_dict, "removable")) {
1579 monitor_printf(mon, " locked=%d", qdict_get_bool(bs_dict, "locked"));
1580 }
1581
1582 if (qdict_haskey(bs_dict, "inserted")) {
1583 QDict *qdict = qobject_to_qdict(qdict_get(bs_dict, "inserted"));
1584
1585 monitor_printf(mon, " file=");
1586 monitor_print_filename(mon, qdict_get_str(qdict, "file"));
1587 if (qdict_haskey(qdict, "backing_file")) {
1588 monitor_printf(mon, " backing_file=");
1589 monitor_print_filename(mon, qdict_get_str(qdict, "backing_file"));
1590 }
1591 monitor_printf(mon, " ro=%d drv=%s encrypted=%d",
1592 qdict_get_bool(qdict, "ro"),
1593 qdict_get_str(qdict, "drv"),
1594 qdict_get_bool(qdict, "encrypted"));
1595 } else {
1596 monitor_printf(mon, " [not inserted]");
1597 }
1598
1599 monitor_printf(mon, "\n");
1600 }
1601
1602 void bdrv_info_print(Monitor *mon, const QObject *data)
1603 {
1604 qlist_iter(qobject_to_qlist(data), bdrv_print_dict, mon);
1605 }
1606
1607 void bdrv_info(Monitor *mon, QObject **ret_data)
1608 {
1609 QList *bs_list;
1610 BlockDriverState *bs;
1611
1612 bs_list = qlist_new();
1613
1614 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1615 QObject *bs_obj;
1616 const char *type = "unknown";
1617
1618 switch(bs->type) {
1619 case BDRV_TYPE_HD:
1620 type = "hd";
1621 break;
1622 case BDRV_TYPE_CDROM:
1623 type = "cdrom";
1624 break;
1625 case BDRV_TYPE_FLOPPY:
1626 type = "floppy";
1627 break;
1628 }
1629
1630 bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': %s, "
1631 "'removable': %i, 'locked': %i }",
1632 bs->device_name, type, bs->removable,
1633 bs->locked);
1634
1635 if (bs->drv) {
1636 QObject *obj;
1637 QDict *bs_dict = qobject_to_qdict(bs_obj);
1638
1639 obj = qobject_from_jsonf("{ 'file': %s, 'ro': %i, 'drv': %s, "
1640 "'encrypted': %i }",
1641 bs->filename, bs->read_only,
1642 bs->drv->format_name,
1643 bdrv_is_encrypted(bs));
1644 if (bs->backing_file[0] != '\0') {
1645 QDict *qdict = qobject_to_qdict(obj);
1646 qdict_put(qdict, "backing_file",
1647 qstring_from_str(bs->backing_file));
1648 }
1649
1650 qdict_put_obj(bs_dict, "inserted", obj);
1651 }
1652 qlist_append_obj(bs_list, bs_obj);
1653 }
1654
1655 *ret_data = QOBJECT(bs_list);
1656 }
1657
1658 static void bdrv_stats_iter(QObject *data, void *opaque)
1659 {
1660 QDict *qdict;
1661 Monitor *mon = opaque;
1662
1663 qdict = qobject_to_qdict(data);
1664 monitor_printf(mon, "%s:", qdict_get_str(qdict, "device"));
1665
1666 qdict = qobject_to_qdict(qdict_get(qdict, "stats"));
1667 monitor_printf(mon, " rd_bytes=%" PRId64
1668 " wr_bytes=%" PRId64
1669 " rd_operations=%" PRId64
1670 " wr_operations=%" PRId64
1671 "\n",
1672 qdict_get_int(qdict, "rd_bytes"),
1673 qdict_get_int(qdict, "wr_bytes"),
1674 qdict_get_int(qdict, "rd_operations"),
1675 qdict_get_int(qdict, "wr_operations"));
1676 }
1677
1678 void bdrv_stats_print(Monitor *mon, const QObject *data)
1679 {
1680 qlist_iter(qobject_to_qlist(data), bdrv_stats_iter, mon);
1681 }
1682
1683 static QObject* bdrv_info_stats_bs(BlockDriverState *bs)
1684 {
1685 QObject *res;
1686 QDict *dict;
1687
1688 res = qobject_from_jsonf("{ 'stats': {"
1689 "'rd_bytes': %" PRId64 ","
1690 "'wr_bytes': %" PRId64 ","
1691 "'rd_operations': %" PRId64 ","
1692 "'wr_operations': %" PRId64 ","
1693 "'wr_highest_offset': %" PRId64
1694 "} }",
1695 bs->rd_bytes, bs->wr_bytes,
1696 bs->rd_ops, bs->wr_ops,
1697 bs->wr_highest_sector *
1698 (uint64_t)BDRV_SECTOR_SIZE);
1699 dict = qobject_to_qdict(res);
1700
1701 if (*bs->device_name) {
1702 qdict_put(dict, "device", qstring_from_str(bs->device_name));
1703 }
1704
1705 if (bs->file) {
1706 QObject *parent = bdrv_info_stats_bs(bs->file);
1707 qdict_put_obj(dict, "parent", parent);
1708 }
1709
1710 return res;
1711 }
1712
1713 void bdrv_info_stats(Monitor *mon, QObject **ret_data)
1714 {
1715 QObject *obj;
1716 QList *devices;
1717 BlockDriverState *bs;
1718
1719 devices = qlist_new();
1720
1721 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1722 obj = bdrv_info_stats_bs(bs);
1723 qlist_append_obj(devices, obj);
1724 }
1725
1726 *ret_data = QOBJECT(devices);
1727 }
1728
1729 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
1730 {
1731 if (bs->backing_hd && bs->backing_hd->encrypted)
1732 return bs->backing_file;
1733 else if (bs->encrypted)
1734 return bs->filename;
1735 else
1736 return NULL;
1737 }
1738
1739 void bdrv_get_backing_filename(BlockDriverState *bs,
1740 char *filename, int filename_size)
1741 {
1742 if (!bs->backing_file) {
1743 pstrcpy(filename, filename_size, "");
1744 } else {
1745 pstrcpy(filename, filename_size, bs->backing_file);
1746 }
1747 }
1748
1749 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1750 const uint8_t *buf, int nb_sectors)
1751 {
1752 BlockDriver *drv = bs->drv;
1753 if (!drv)
1754 return -ENOMEDIUM;
1755 if (!drv->bdrv_write_compressed)
1756 return -ENOTSUP;
1757 if (bdrv_check_request(bs, sector_num, nb_sectors))
1758 return -EIO;
1759
1760 if (bs->dirty_bitmap) {
1761 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1762 }
1763
1764 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1765 }
1766
1767 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1768 {
1769 BlockDriver *drv = bs->drv;
1770 if (!drv)
1771 return -ENOMEDIUM;
1772 if (!drv->bdrv_get_info)
1773 return -ENOTSUP;
1774 memset(bdi, 0, sizeof(*bdi));
1775 return drv->bdrv_get_info(bs, bdi);
1776 }
1777
1778 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1779 int64_t pos, int size)
1780 {
1781 BlockDriver *drv = bs->drv;
1782 if (!drv)
1783 return -ENOMEDIUM;
1784 if (drv->bdrv_save_vmstate)
1785 return drv->bdrv_save_vmstate(bs, buf, pos, size);
1786 if (bs->file)
1787 return bdrv_save_vmstate(bs->file, buf, pos, size);
1788 return -ENOTSUP;
1789 }
1790
1791 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1792 int64_t pos, int size)
1793 {
1794 BlockDriver *drv = bs->drv;
1795 if (!drv)
1796 return -ENOMEDIUM;
1797 if (drv->bdrv_load_vmstate)
1798 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1799 if (bs->file)
1800 return bdrv_load_vmstate(bs->file, buf, pos, size);
1801 return -ENOTSUP;
1802 }
1803
1804 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
1805 {
1806 BlockDriver *drv = bs->drv;
1807
1808 if (!drv || !drv->bdrv_debug_event) {
1809 return;
1810 }
1811
1812 return drv->bdrv_debug_event(bs, event);
1813
1814 }
1815
1816 /**************************************************************/
1817 /* handling of snapshots */
1818
1819 int bdrv_can_snapshot(BlockDriverState *bs)
1820 {
1821 BlockDriver *drv = bs->drv;
1822 if (!drv || bdrv_is_removable(bs) || bdrv_is_read_only(bs)) {
1823 return 0;
1824 }
1825
1826 if (!drv->bdrv_snapshot_create) {
1827 if (bs->file != NULL) {
1828 return bdrv_can_snapshot(bs->file);
1829 }
1830 return 0;
1831 }
1832
1833 return 1;
1834 }
1835
1836 int bdrv_is_snapshot(BlockDriverState *bs)
1837 {
1838 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
1839 }
1840
1841 BlockDriverState *bdrv_snapshots(void)
1842 {
1843 BlockDriverState *bs;
1844
1845 if (bs_snapshots) {
1846 return bs_snapshots;
1847 }
1848
1849 bs = NULL;
1850 while ((bs = bdrv_next(bs))) {
1851 if (bdrv_can_snapshot(bs)) {
1852 bs_snapshots = bs;
1853 return bs;
1854 }
1855 }
1856 return NULL;
1857 }
1858
1859 int bdrv_snapshot_create(BlockDriverState *bs,
1860 QEMUSnapshotInfo *sn_info)
1861 {
1862 BlockDriver *drv = bs->drv;
1863 if (!drv)
1864 return -ENOMEDIUM;
1865 if (drv->bdrv_snapshot_create)
1866 return drv->bdrv_snapshot_create(bs, sn_info);
1867 if (bs->file)
1868 return bdrv_snapshot_create(bs->file, sn_info);
1869 return -ENOTSUP;
1870 }
1871
1872 int bdrv_snapshot_goto(BlockDriverState *bs,
1873 const char *snapshot_id)
1874 {
1875 BlockDriver *drv = bs->drv;
1876 int ret, open_ret;
1877
1878 if (!drv)
1879 return -ENOMEDIUM;
1880 if (drv->bdrv_snapshot_goto)
1881 return drv->bdrv_snapshot_goto(bs, snapshot_id);
1882
1883 if (bs->file) {
1884 drv->bdrv_close(bs);
1885 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
1886 open_ret = drv->bdrv_open(bs, bs->open_flags);
1887 if (open_ret < 0) {
1888 bdrv_delete(bs->file);
1889 bs->drv = NULL;
1890 return open_ret;
1891 }
1892 return ret;
1893 }
1894
1895 return -ENOTSUP;
1896 }
1897
1898 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
1899 {
1900 BlockDriver *drv = bs->drv;
1901 if (!drv)
1902 return -ENOMEDIUM;
1903 if (drv->bdrv_snapshot_delete)
1904 return drv->bdrv_snapshot_delete(bs, snapshot_id);
1905 if (bs->file)
1906 return bdrv_snapshot_delete(bs->file, snapshot_id);
1907 return -ENOTSUP;
1908 }
1909
1910 int bdrv_snapshot_list(BlockDriverState *bs,
1911 QEMUSnapshotInfo **psn_info)
1912 {
1913 BlockDriver *drv = bs->drv;
1914 if (!drv)
1915 return -ENOMEDIUM;
1916 if (drv->bdrv_snapshot_list)
1917 return drv->bdrv_snapshot_list(bs, psn_info);
1918 if (bs->file)
1919 return bdrv_snapshot_list(bs->file, psn_info);
1920 return -ENOTSUP;
1921 }
1922
1923 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
1924 const char *snapshot_name)
1925 {
1926 BlockDriver *drv = bs->drv;
1927 if (!drv) {
1928 return -ENOMEDIUM;
1929 }
1930 if (!bs->read_only) {
1931 return -EINVAL;
1932 }
1933 if (drv->bdrv_snapshot_load_tmp) {
1934 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
1935 }
1936 return -ENOTSUP;
1937 }
1938
1939 #define NB_SUFFIXES 4
1940
1941 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
1942 {
1943 static const char suffixes[NB_SUFFIXES] = "KMGT";
1944 int64_t base;
1945 int i;
1946
1947 if (size <= 999) {
1948 snprintf(buf, buf_size, "%" PRId64, size);
1949 } else {
1950 base = 1024;
1951 for(i = 0; i < NB_SUFFIXES; i++) {
1952 if (size < (10 * base)) {
1953 snprintf(buf, buf_size, "%0.1f%c",
1954 (double)size / base,
1955 suffixes[i]);
1956 break;
1957 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
1958 snprintf(buf, buf_size, "%" PRId64 "%c",
1959 ((size + (base >> 1)) / base),
1960 suffixes[i]);
1961 break;
1962 }
1963 base = base * 1024;
1964 }
1965 }
1966 return buf;
1967 }
1968
1969 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
1970 {
1971 char buf1[128], date_buf[128], clock_buf[128];
1972 #ifdef _WIN32
1973 struct tm *ptm;
1974 #else
1975 struct tm tm;
1976 #endif
1977 time_t ti;
1978 int64_t secs;
1979
1980 if (!sn) {
1981 snprintf(buf, buf_size,
1982 "%-10s%-20s%7s%20s%15s",
1983 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
1984 } else {
1985 ti = sn->date_sec;
1986 #ifdef _WIN32
1987 ptm = localtime(&ti);
1988 strftime(date_buf, sizeof(date_buf),
1989 "%Y-%m-%d %H:%M:%S", ptm);
1990 #else
1991 localtime_r(&ti, &tm);
1992 strftime(date_buf, sizeof(date_buf),
1993 "%Y-%m-%d %H:%M:%S", &tm);
1994 #endif
1995 secs = sn->vm_clock_nsec / 1000000000;
1996 snprintf(clock_buf, sizeof(clock_buf),
1997 "%02d:%02d:%02d.%03d",
1998 (int)(secs / 3600),
1999 (int)((secs / 60) % 60),
2000 (int)(secs % 60),
2001 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2002 snprintf(buf, buf_size,
2003 "%-10s%-20s%7s%20s%15s",
2004 sn->id_str, sn->name,
2005 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2006 date_buf,
2007 clock_buf);
2008 }
2009 return buf;
2010 }
2011
2012
2013 /**************************************************************/
2014 /* async I/Os */
2015
2016 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2017 QEMUIOVector *qiov, int nb_sectors,
2018 BlockDriverCompletionFunc *cb, void *opaque)
2019 {
2020 BlockDriver *drv = bs->drv;
2021 BlockDriverAIOCB *ret;
2022
2023 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2024
2025 if (!drv)
2026 return NULL;
2027 if (bdrv_check_request(bs, sector_num, nb_sectors))
2028 return NULL;
2029
2030 ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
2031 cb, opaque);
2032
2033 if (ret) {
2034 /* Update stats even though technically transfer has not happened. */
2035 bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2036 bs->rd_ops ++;
2037 }
2038
2039 return ret;
2040 }
2041
2042 typedef struct BlockCompleteData {
2043 BlockDriverCompletionFunc *cb;
2044 void *opaque;
2045 BlockDriverState *bs;
2046 int64_t sector_num;
2047 int nb_sectors;
2048 } BlockCompleteData;
2049
2050 static void block_complete_cb(void *opaque, int ret)
2051 {
2052 BlockCompleteData *b = opaque;
2053
2054 if (b->bs->dirty_bitmap) {
2055 set_dirty_bitmap(b->bs, b->sector_num, b->nb_sectors, 1);
2056 }
2057 b->cb(b->opaque, ret);
2058 qemu_free(b);
2059 }
2060
2061 static BlockCompleteData *blk_dirty_cb_alloc(BlockDriverState *bs,
2062 int64_t sector_num,
2063 int nb_sectors,
2064 BlockDriverCompletionFunc *cb,
2065 void *opaque)
2066 {
2067 BlockCompleteData *blkdata = qemu_mallocz(sizeof(BlockCompleteData));
2068
2069 blkdata->bs = bs;
2070 blkdata->cb = cb;
2071 blkdata->opaque = opaque;
2072 blkdata->sector_num = sector_num;
2073 blkdata->nb_sectors = nb_sectors;
2074
2075 return blkdata;
2076 }
2077
2078 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2079 QEMUIOVector *qiov, int nb_sectors,
2080 BlockDriverCompletionFunc *cb, void *opaque)
2081 {
2082 BlockDriver *drv = bs->drv;
2083 BlockDriverAIOCB *ret;
2084 BlockCompleteData *blk_cb_data;
2085
2086 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2087
2088 if (!drv)
2089 return NULL;
2090 if (bs->read_only)
2091 return NULL;
2092 if (bdrv_check_request(bs, sector_num, nb_sectors))
2093 return NULL;
2094
2095 if (bs->dirty_bitmap) {
2096 blk_cb_data = blk_dirty_cb_alloc(bs, sector_num, nb_sectors, cb,
2097 opaque);
2098 cb = &block_complete_cb;
2099 opaque = blk_cb_data;
2100 }
2101
2102 ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
2103 cb, opaque);
2104
2105 if (ret) {
2106 /* Update stats even though technically transfer has not happened. */
2107 bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2108 bs->wr_ops ++;
2109 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2110 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2111 }
2112 }
2113
2114 return ret;
2115 }
2116
2117
2118 typedef struct MultiwriteCB {
2119 int error;
2120 int num_requests;
2121 int num_callbacks;
2122 struct {
2123 BlockDriverCompletionFunc *cb;
2124 void *opaque;
2125 QEMUIOVector *free_qiov;
2126 void *free_buf;
2127 } callbacks[];
2128 } MultiwriteCB;
2129
2130 static void multiwrite_user_cb(MultiwriteCB *mcb)
2131 {
2132 int i;
2133
2134 for (i = 0; i < mcb->num_callbacks; i++) {
2135 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2136 if (mcb->callbacks[i].free_qiov) {
2137 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2138 }
2139 qemu_free(mcb->callbacks[i].free_qiov);
2140 qemu_vfree(mcb->callbacks[i].free_buf);
2141 }
2142 }
2143
2144 static void multiwrite_cb(void *opaque, int ret)
2145 {
2146 MultiwriteCB *mcb = opaque;
2147
2148 trace_multiwrite_cb(mcb, ret);
2149
2150 if (ret < 0 && !mcb->error) {
2151 mcb->error = ret;
2152 }
2153
2154 mcb->num_requests--;
2155 if (mcb->num_requests == 0) {
2156 multiwrite_user_cb(mcb);
2157 qemu_free(mcb);
2158 }
2159 }
2160
2161 static int multiwrite_req_compare(const void *a, const void *b)
2162 {
2163 const BlockRequest *req1 = a, *req2 = b;
2164
2165 /*
2166 * Note that we can't simply subtract req2->sector from req1->sector
2167 * here as that could overflow the return value.
2168 */
2169 if (req1->sector > req2->sector) {
2170 return 1;
2171 } else if (req1->sector < req2->sector) {
2172 return -1;
2173 } else {
2174 return 0;
2175 }
2176 }
2177
2178 /*
2179 * Takes a bunch of requests and tries to merge them. Returns the number of
2180 * requests that remain after merging.
2181 */
2182 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2183 int num_reqs, MultiwriteCB *mcb)
2184 {
2185 int i, outidx;
2186
2187 // Sort requests by start sector
2188 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2189
2190 // Check if adjacent requests touch the same clusters. If so, combine them,
2191 // filling up gaps with zero sectors.
2192 outidx = 0;
2193 for (i = 1; i < num_reqs; i++) {
2194 int merge = 0;
2195 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2196
2197 // This handles the cases that are valid for all block drivers, namely
2198 // exactly sequential writes and overlapping writes.
2199 if (reqs[i].sector <= oldreq_last) {
2200 merge = 1;
2201 }
2202
2203 // The block driver may decide that it makes sense to combine requests
2204 // even if there is a gap of some sectors between them. In this case,
2205 // the gap is filled with zeros (therefore only applicable for yet
2206 // unused space in format like qcow2).
2207 if (!merge && bs->drv->bdrv_merge_requests) {
2208 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2209 }
2210
2211 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2212 merge = 0;
2213 }
2214
2215 if (merge) {
2216 size_t size;
2217 QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));
2218 qemu_iovec_init(qiov,
2219 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2220
2221 // Add the first request to the merged one. If the requests are
2222 // overlapping, drop the last sectors of the first request.
2223 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2224 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2225
2226 // We might need to add some zeros between the two requests
2227 if (reqs[i].sector > oldreq_last) {
2228 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2229 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2230 memset(buf, 0, zero_bytes);
2231 qemu_iovec_add(qiov, buf, zero_bytes);
2232 mcb->callbacks[i].free_buf = buf;
2233 }
2234
2235 // Add the second request
2236 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2237
2238 reqs[outidx].nb_sectors = qiov->size >> 9;
2239 reqs[outidx].qiov = qiov;
2240
2241 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2242 } else {
2243 outidx++;
2244 reqs[outidx].sector = reqs[i].sector;
2245 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2246 reqs[outidx].qiov = reqs[i].qiov;
2247 }
2248 }
2249
2250 return outidx + 1;
2251 }
2252
2253 /*
2254 * Submit multiple AIO write requests at once.
2255 *
2256 * On success, the function returns 0 and all requests in the reqs array have
2257 * been submitted. In error case this function returns -1, and any of the
2258 * requests may or may not be submitted yet. In particular, this means that the
2259 * callback will be called for some of the requests, for others it won't. The
2260 * caller must check the error field of the BlockRequest to wait for the right
2261 * callbacks (if error != 0, no callback will be called).
2262 *
2263 * The implementation may modify the contents of the reqs array, e.g. to merge
2264 * requests. However, the fields opaque and error are left unmodified as they
2265 * are used to signal failure for a single request to the caller.
2266 */
2267 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2268 {
2269 BlockDriverAIOCB *acb;
2270 MultiwriteCB *mcb;
2271 int i;
2272
2273 if (num_reqs == 0) {
2274 return 0;
2275 }
2276
2277 // Create MultiwriteCB structure
2278 mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2279 mcb->num_requests = 0;
2280 mcb->num_callbacks = num_reqs;
2281
2282 for (i = 0; i < num_reqs; i++) {
2283 mcb->callbacks[i].cb = reqs[i].cb;
2284 mcb->callbacks[i].opaque = reqs[i].opaque;
2285 }
2286
2287 // Check for mergable requests
2288 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2289
2290 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2291
2292 /*
2293 * Run the aio requests. As soon as one request can't be submitted
2294 * successfully, fail all requests that are not yet submitted (we must
2295 * return failure for all requests anyway)
2296 *
2297 * num_requests cannot be set to the right value immediately: If
2298 * bdrv_aio_writev fails for some request, num_requests would be too high
2299 * and therefore multiwrite_cb() would never recognize the multiwrite
2300 * request as completed. We also cannot use the loop variable i to set it
2301 * when the first request fails because the callback may already have been
2302 * called for previously submitted requests. Thus, num_requests must be
2303 * incremented for each request that is submitted.
2304 *
2305 * The problem that callbacks may be called early also means that we need
2306 * to take care that num_requests doesn't become 0 before all requests are
2307 * submitted - multiwrite_cb() would consider the multiwrite request
2308 * completed. A dummy request that is "completed" by a manual call to
2309 * multiwrite_cb() takes care of this.
2310 */
2311 mcb->num_requests = 1;
2312
2313 // Run the aio requests
2314 for (i = 0; i < num_reqs; i++) {
2315 mcb->num_requests++;
2316 acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2317 reqs[i].nb_sectors, multiwrite_cb, mcb);
2318
2319 if (acb == NULL) {
2320 // We can only fail the whole thing if no request has been
2321 // submitted yet. Otherwise we'll wait for the submitted AIOs to
2322 // complete and report the error in the callback.
2323 if (i == 0) {
2324 trace_bdrv_aio_multiwrite_earlyfail(mcb);
2325 goto fail;
2326 } else {
2327 trace_bdrv_aio_multiwrite_latefail(mcb, i);
2328 multiwrite_cb(mcb, -EIO);
2329 break;
2330 }
2331 }
2332 }
2333
2334 /* Complete the dummy request */
2335 multiwrite_cb(mcb, 0);
2336
2337 return 0;
2338
2339 fail:
2340 for (i = 0; i < mcb->num_callbacks; i++) {
2341 reqs[i].error = -EIO;
2342 }
2343 qemu_free(mcb);
2344 return -1;
2345 }
2346
2347 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2348 BlockDriverCompletionFunc *cb, void *opaque)
2349 {
2350 BlockDriver *drv = bs->drv;
2351
2352 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2353 return bdrv_aio_noop_em(bs, cb, opaque);
2354 }
2355
2356 if (!drv)
2357 return NULL;
2358 return drv->bdrv_aio_flush(bs, cb, opaque);
2359 }
2360
2361 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2362 {
2363 acb->pool->cancel(acb);
2364 }
2365
2366
2367 /**************************************************************/
2368 /* async block device emulation */
2369
2370 typedef struct BlockDriverAIOCBSync {
2371 BlockDriverAIOCB common;
2372 QEMUBH *bh;
2373 int ret;
2374 /* vector translation state */
2375 QEMUIOVector *qiov;
2376 uint8_t *bounce;
2377 int is_write;
2378 } BlockDriverAIOCBSync;
2379
2380 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
2381 {
2382 BlockDriverAIOCBSync *acb =
2383 container_of(blockacb, BlockDriverAIOCBSync, common);
2384 qemu_bh_delete(acb->bh);
2385 acb->bh = NULL;
2386 qemu_aio_release(acb);
2387 }
2388
2389 static AIOPool bdrv_em_aio_pool = {
2390 .aiocb_size = sizeof(BlockDriverAIOCBSync),
2391 .cancel = bdrv_aio_cancel_em,
2392 };
2393
2394 static void bdrv_aio_bh_cb(void *opaque)
2395 {
2396 BlockDriverAIOCBSync *acb = opaque;
2397
2398 if (!acb->is_write)
2399 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
2400 qemu_vfree(acb->bounce);
2401 acb->common.cb(acb->common.opaque, acb->ret);
2402 qemu_bh_delete(acb->bh);
2403 acb->bh = NULL;
2404 qemu_aio_release(acb);
2405 }
2406
2407 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2408 int64_t sector_num,
2409 QEMUIOVector *qiov,
2410 int nb_sectors,
2411 BlockDriverCompletionFunc *cb,
2412 void *opaque,
2413 int is_write)
2414
2415 {
2416 BlockDriverAIOCBSync *acb;
2417
2418 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2419 acb->is_write = is_write;
2420 acb->qiov = qiov;
2421 acb->bounce = qemu_blockalign(bs, qiov->size);
2422
2423 if (!acb->bh)
2424 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2425
2426 if (is_write) {
2427 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
2428 acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2429 } else {
2430 acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2431 }
2432
2433 qemu_bh_schedule(acb->bh);
2434
2435 return &acb->common;
2436 }
2437
2438 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2439 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2440 BlockDriverCompletionFunc *cb, void *opaque)
2441 {
2442 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2443 }
2444
2445 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2446 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2447 BlockDriverCompletionFunc *cb, void *opaque)
2448 {
2449 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2450 }
2451
2452 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
2453 BlockDriverCompletionFunc *cb, void *opaque)
2454 {
2455 BlockDriverAIOCBSync *acb;
2456
2457 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2458 acb->is_write = 1; /* don't bounce in the completion hadler */
2459 acb->qiov = NULL;
2460 acb->bounce = NULL;
2461 acb->ret = 0;
2462
2463 if (!acb->bh)
2464 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2465
2466 bdrv_flush(bs);
2467 qemu_bh_schedule(acb->bh);
2468 return &acb->common;
2469 }
2470
2471 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
2472 BlockDriverCompletionFunc *cb, void *opaque)
2473 {
2474 BlockDriverAIOCBSync *acb;
2475
2476 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2477 acb->is_write = 1; /* don't bounce in the completion handler */
2478 acb->qiov = NULL;
2479 acb->bounce = NULL;
2480 acb->ret = 0;
2481
2482 if (!acb->bh) {
2483 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2484 }
2485
2486 qemu_bh_schedule(acb->bh);
2487 return &acb->common;
2488 }
2489
2490 /**************************************************************/
2491 /* sync block device emulation */
2492
2493 static void bdrv_rw_em_cb(void *opaque, int ret)
2494 {
2495 *(int *)opaque = ret;
2496 }
2497
2498 #define NOT_DONE 0x7fffffff
2499
2500 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
2501 uint8_t *buf, int nb_sectors)
2502 {
2503 int async_ret;
2504 BlockDriverAIOCB *acb;
2505 struct iovec iov;
2506 QEMUIOVector qiov;
2507
2508 async_context_push();
2509
2510 async_ret = NOT_DONE;
2511 iov.iov_base = (void *)buf;
2512 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2513 qemu_iovec_init_external(&qiov, &iov, 1);
2514 acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
2515 bdrv_rw_em_cb, &async_ret);
2516 if (acb == NULL) {
2517 async_ret = -1;
2518 goto fail;
2519 }
2520
2521 while (async_ret == NOT_DONE) {
2522 qemu_aio_wait();
2523 }
2524
2525
2526 fail:
2527 async_context_pop();
2528 return async_ret;
2529 }
2530
2531 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
2532 const uint8_t *buf, int nb_sectors)
2533 {
2534 int async_ret;
2535 BlockDriverAIOCB *acb;
2536 struct iovec iov;
2537 QEMUIOVector qiov;
2538
2539 async_context_push();
2540
2541 async_ret = NOT_DONE;
2542 iov.iov_base = (void *)buf;
2543 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2544 qemu_iovec_init_external(&qiov, &iov, 1);
2545 acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
2546 bdrv_rw_em_cb, &async_ret);
2547 if (acb == NULL) {
2548 async_ret = -1;
2549 goto fail;
2550 }
2551 while (async_ret == NOT_DONE) {
2552 qemu_aio_wait();
2553 }
2554
2555 fail:
2556 async_context_pop();
2557 return async_ret;
2558 }
2559
2560 void bdrv_init(void)
2561 {
2562 module_call_init(MODULE_INIT_BLOCK);
2563 }
2564
2565 void bdrv_init_with_whitelist(void)
2566 {
2567 use_bdrv_whitelist = 1;
2568 bdrv_init();
2569 }
2570
2571 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
2572 BlockDriverCompletionFunc *cb, void *opaque)
2573 {
2574 BlockDriverAIOCB *acb;
2575
2576 if (pool->free_aiocb) {
2577 acb = pool->free_aiocb;
2578 pool->free_aiocb = acb->next;
2579 } else {
2580 acb = qemu_mallocz(pool->aiocb_size);
2581 acb->pool = pool;
2582 }
2583 acb->bs = bs;
2584 acb->cb = cb;
2585 acb->opaque = opaque;
2586 return acb;
2587 }
2588
2589 void qemu_aio_release(void *p)
2590 {
2591 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
2592 AIOPool *pool = acb->pool;
2593 acb->next = pool->free_aiocb;
2594 pool->free_aiocb = acb;
2595 }
2596
2597 /**************************************************************/
2598 /* removable device support */
2599
2600 /**
2601 * Return TRUE if the media is present
2602 */
2603 int bdrv_is_inserted(BlockDriverState *bs)
2604 {
2605 BlockDriver *drv = bs->drv;
2606 int ret;
2607 if (!drv)
2608 return 0;
2609 if (!drv->bdrv_is_inserted)
2610 return !bs->tray_open;
2611 ret = drv->bdrv_is_inserted(bs);
2612 return ret;
2613 }
2614
2615 /**
2616 * Return TRUE if the media changed since the last call to this
2617 * function. It is currently only used for floppy disks
2618 */
2619 int bdrv_media_changed(BlockDriverState *bs)
2620 {
2621 BlockDriver *drv = bs->drv;
2622 int ret;
2623
2624 if (!drv || !drv->bdrv_media_changed)
2625 ret = -ENOTSUP;
2626 else
2627 ret = drv->bdrv_media_changed(bs);
2628 if (ret == -ENOTSUP)
2629 ret = bs->media_changed;
2630 bs->media_changed = 0;
2631 return ret;
2632 }
2633
2634 /**
2635 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
2636 */
2637 int bdrv_eject(BlockDriverState *bs, int eject_flag)
2638 {
2639 BlockDriver *drv = bs->drv;
2640 int ret;
2641
2642 if (bs->locked) {
2643 return -EBUSY;
2644 }
2645
2646 if (!drv || !drv->bdrv_eject) {
2647 ret = -ENOTSUP;
2648 } else {
2649 ret = drv->bdrv_eject(bs, eject_flag);
2650 }
2651 if (ret == -ENOTSUP) {
2652 ret = 0;
2653 }
2654 if (ret >= 0) {
2655 bs->tray_open = eject_flag;
2656 }
2657
2658 return ret;
2659 }
2660
2661 int bdrv_is_locked(BlockDriverState *bs)
2662 {
2663 return bs->locked;
2664 }
2665
2666 /**
2667 * Lock or unlock the media (if it is locked, the user won't be able
2668 * to eject it manually).
2669 */
2670 void bdrv_set_locked(BlockDriverState *bs, int locked)
2671 {
2672 BlockDriver *drv = bs->drv;
2673
2674 bs->locked = locked;
2675 if (drv && drv->bdrv_set_locked) {
2676 drv->bdrv_set_locked(bs, locked);
2677 }
2678 }
2679
2680 /* needed for generic scsi interface */
2681
2682 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2683 {
2684 BlockDriver *drv = bs->drv;
2685
2686 if (drv && drv->bdrv_ioctl)
2687 return drv->bdrv_ioctl(bs, req, buf);
2688 return -ENOTSUP;
2689 }
2690
2691 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2692 unsigned long int req, void *buf,
2693 BlockDriverCompletionFunc *cb, void *opaque)
2694 {
2695 BlockDriver *drv = bs->drv;
2696
2697 if (drv && drv->bdrv_aio_ioctl)
2698 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
2699 return NULL;
2700 }
2701
2702
2703
2704 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2705 {
2706 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
2707 }
2708
2709 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
2710 {
2711 int64_t bitmap_size;
2712
2713 bs->dirty_count = 0;
2714 if (enable) {
2715 if (!bs->dirty_bitmap) {
2716 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
2717 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
2718 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
2719
2720 bs->dirty_bitmap = qemu_mallocz(bitmap_size);
2721 }
2722 } else {
2723 if (bs->dirty_bitmap) {
2724 qemu_free(bs->dirty_bitmap);
2725 bs->dirty_bitmap = NULL;
2726 }
2727 }
2728 }
2729
2730 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
2731 {
2732 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
2733
2734 if (bs->dirty_bitmap &&
2735 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
2736 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
2737 (1UL << (chunk % (sizeof(unsigned long) * 8))));
2738 } else {
2739 return 0;
2740 }
2741 }
2742
2743 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
2744 int nr_sectors)
2745 {
2746 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
2747 }
2748
2749 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
2750 {
2751 return bs->dirty_count;
2752 }