]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
block: wait_for_overlapping_requests() deadlock detection
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qjson.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
34
35 #ifdef CONFIG_BSD
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
40 #ifndef __DragonFly__
41 #include <sys/disk.h>
42 #endif
43 #endif
44
45 #ifdef _WIN32
46 #include <windows.h>
47 #endif
48
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
51 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
52 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
53 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
54 BlockDriverCompletionFunc *cb, void *opaque);
55 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
56 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
57 BlockDriverCompletionFunc *cb, void *opaque);
58 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
59 int64_t sector_num, int nb_sectors,
60 QEMUIOVector *iov);
61 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
62 int64_t sector_num, int nb_sectors,
63 QEMUIOVector *iov);
64 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
65 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
66 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
68 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
69 int64_t sector_num,
70 QEMUIOVector *qiov,
71 int nb_sectors,
72 BlockDriverCompletionFunc *cb,
73 void *opaque,
74 bool is_write);
75 static void coroutine_fn bdrv_co_do_rw(void *opaque);
76
77 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
78 bool is_write, double elapsed_time, uint64_t *wait);
79 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
80 double elapsed_time, uint64_t *wait);
81 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
82 bool is_write, int64_t *wait);
83
84 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
85 QTAILQ_HEAD_INITIALIZER(bdrv_states);
86
87 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
88 QLIST_HEAD_INITIALIZER(bdrv_drivers);
89
90 /* The device to use for VM snapshots */
91 static BlockDriverState *bs_snapshots;
92
93 /* If non-zero, use only whitelisted block drivers */
94 static int use_bdrv_whitelist;
95
96 #ifdef _WIN32
97 static int is_windows_drive_prefix(const char *filename)
98 {
99 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
100 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
101 filename[1] == ':');
102 }
103
104 int is_windows_drive(const char *filename)
105 {
106 if (is_windows_drive_prefix(filename) &&
107 filename[2] == '\0')
108 return 1;
109 if (strstart(filename, "\\\\.\\", NULL) ||
110 strstart(filename, "//./", NULL))
111 return 1;
112 return 0;
113 }
114 #endif
115
116 /* throttling disk I/O limits */
117 void bdrv_io_limits_disable(BlockDriverState *bs)
118 {
119 bs->io_limits_enabled = false;
120
121 while (qemu_co_queue_next(&bs->throttled_reqs));
122
123 if (bs->block_timer) {
124 qemu_del_timer(bs->block_timer);
125 qemu_free_timer(bs->block_timer);
126 bs->block_timer = NULL;
127 }
128
129 bs->slice_start = 0;
130 bs->slice_end = 0;
131 bs->slice_time = 0;
132 memset(&bs->io_base, 0, sizeof(bs->io_base));
133 }
134
135 static void bdrv_block_timer(void *opaque)
136 {
137 BlockDriverState *bs = opaque;
138
139 qemu_co_queue_next(&bs->throttled_reqs);
140 }
141
142 void bdrv_io_limits_enable(BlockDriverState *bs)
143 {
144 qemu_co_queue_init(&bs->throttled_reqs);
145 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
146 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
147 bs->slice_start = qemu_get_clock_ns(vm_clock);
148 bs->slice_end = bs->slice_start + bs->slice_time;
149 memset(&bs->io_base, 0, sizeof(bs->io_base));
150 bs->io_limits_enabled = true;
151 }
152
153 bool bdrv_io_limits_enabled(BlockDriverState *bs)
154 {
155 BlockIOLimit *io_limits = &bs->io_limits;
156 return io_limits->bps[BLOCK_IO_LIMIT_READ]
157 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
158 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
159 || io_limits->iops[BLOCK_IO_LIMIT_READ]
160 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
161 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
162 }
163
164 static void bdrv_io_limits_intercept(BlockDriverState *bs,
165 bool is_write, int nb_sectors)
166 {
167 int64_t wait_time = -1;
168
169 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
170 qemu_co_queue_wait(&bs->throttled_reqs);
171 }
172
173 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
174 * throttled requests will not be dequeued until the current request is
175 * allowed to be serviced. So if the current request still exceeds the
176 * limits, it will be inserted to the head. All requests followed it will
177 * be still in throttled_reqs queue.
178 */
179
180 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
181 qemu_mod_timer(bs->block_timer,
182 wait_time + qemu_get_clock_ns(vm_clock));
183 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
184 }
185
186 qemu_co_queue_next(&bs->throttled_reqs);
187 }
188
189 /* check if the path starts with "<protocol>:" */
190 static int path_has_protocol(const char *path)
191 {
192 #ifdef _WIN32
193 if (is_windows_drive(path) ||
194 is_windows_drive_prefix(path)) {
195 return 0;
196 }
197 #endif
198
199 return strchr(path, ':') != NULL;
200 }
201
202 int path_is_absolute(const char *path)
203 {
204 const char *p;
205 #ifdef _WIN32
206 /* specific case for names like: "\\.\d:" */
207 if (*path == '/' || *path == '\\')
208 return 1;
209 #endif
210 p = strchr(path, ':');
211 if (p)
212 p++;
213 else
214 p = path;
215 #ifdef _WIN32
216 return (*p == '/' || *p == '\\');
217 #else
218 return (*p == '/');
219 #endif
220 }
221
222 /* if filename is absolute, just copy it to dest. Otherwise, build a
223 path to it by considering it is relative to base_path. URL are
224 supported. */
225 void path_combine(char *dest, int dest_size,
226 const char *base_path,
227 const char *filename)
228 {
229 const char *p, *p1;
230 int len;
231
232 if (dest_size <= 0)
233 return;
234 if (path_is_absolute(filename)) {
235 pstrcpy(dest, dest_size, filename);
236 } else {
237 p = strchr(base_path, ':');
238 if (p)
239 p++;
240 else
241 p = base_path;
242 p1 = strrchr(base_path, '/');
243 #ifdef _WIN32
244 {
245 const char *p2;
246 p2 = strrchr(base_path, '\\');
247 if (!p1 || p2 > p1)
248 p1 = p2;
249 }
250 #endif
251 if (p1)
252 p1++;
253 else
254 p1 = base_path;
255 if (p1 > p)
256 p = p1;
257 len = p - base_path;
258 if (len > dest_size - 1)
259 len = dest_size - 1;
260 memcpy(dest, base_path, len);
261 dest[len] = '\0';
262 pstrcat(dest, dest_size, filename);
263 }
264 }
265
266 void bdrv_register(BlockDriver *bdrv)
267 {
268 /* Block drivers without coroutine functions need emulation */
269 if (!bdrv->bdrv_co_readv) {
270 bdrv->bdrv_co_readv = bdrv_co_readv_em;
271 bdrv->bdrv_co_writev = bdrv_co_writev_em;
272
273 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
274 * the block driver lacks aio we need to emulate that too.
275 */
276 if (!bdrv->bdrv_aio_readv) {
277 /* add AIO emulation layer */
278 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
279 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
280 }
281 }
282
283 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
284 }
285
286 /* create a new block device (by default it is empty) */
287 BlockDriverState *bdrv_new(const char *device_name)
288 {
289 BlockDriverState *bs;
290
291 bs = g_malloc0(sizeof(BlockDriverState));
292 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
293 if (device_name[0] != '\0') {
294 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
295 }
296 bdrv_iostatus_disable(bs);
297 return bs;
298 }
299
300 BlockDriver *bdrv_find_format(const char *format_name)
301 {
302 BlockDriver *drv1;
303 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
304 if (!strcmp(drv1->format_name, format_name)) {
305 return drv1;
306 }
307 }
308 return NULL;
309 }
310
311 static int bdrv_is_whitelisted(BlockDriver *drv)
312 {
313 static const char *whitelist[] = {
314 CONFIG_BDRV_WHITELIST
315 };
316 const char **p;
317
318 if (!whitelist[0])
319 return 1; /* no whitelist, anything goes */
320
321 for (p = whitelist; *p; p++) {
322 if (!strcmp(drv->format_name, *p)) {
323 return 1;
324 }
325 }
326 return 0;
327 }
328
329 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
330 {
331 BlockDriver *drv = bdrv_find_format(format_name);
332 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
333 }
334
335 int bdrv_create(BlockDriver *drv, const char* filename,
336 QEMUOptionParameter *options)
337 {
338 if (!drv->bdrv_create)
339 return -ENOTSUP;
340
341 return drv->bdrv_create(filename, options);
342 }
343
344 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
345 {
346 BlockDriver *drv;
347
348 drv = bdrv_find_protocol(filename);
349 if (drv == NULL) {
350 return -ENOENT;
351 }
352
353 return bdrv_create(drv, filename, options);
354 }
355
356 #ifdef _WIN32
357 void get_tmp_filename(char *filename, int size)
358 {
359 char temp_dir[MAX_PATH];
360
361 GetTempPath(MAX_PATH, temp_dir);
362 GetTempFileName(temp_dir, "qem", 0, filename);
363 }
364 #else
365 void get_tmp_filename(char *filename, int size)
366 {
367 int fd;
368 const char *tmpdir;
369 /* XXX: race condition possible */
370 tmpdir = getenv("TMPDIR");
371 if (!tmpdir)
372 tmpdir = "/tmp";
373 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
374 fd = mkstemp(filename);
375 close(fd);
376 }
377 #endif
378
379 /*
380 * Detect host devices. By convention, /dev/cdrom[N] is always
381 * recognized as a host CDROM.
382 */
383 static BlockDriver *find_hdev_driver(const char *filename)
384 {
385 int score_max = 0, score;
386 BlockDriver *drv = NULL, *d;
387
388 QLIST_FOREACH(d, &bdrv_drivers, list) {
389 if (d->bdrv_probe_device) {
390 score = d->bdrv_probe_device(filename);
391 if (score > score_max) {
392 score_max = score;
393 drv = d;
394 }
395 }
396 }
397
398 return drv;
399 }
400
401 BlockDriver *bdrv_find_protocol(const char *filename)
402 {
403 BlockDriver *drv1;
404 char protocol[128];
405 int len;
406 const char *p;
407
408 /* TODO Drivers without bdrv_file_open must be specified explicitly */
409
410 /*
411 * XXX(hch): we really should not let host device detection
412 * override an explicit protocol specification, but moving this
413 * later breaks access to device names with colons in them.
414 * Thanks to the brain-dead persistent naming schemes on udev-
415 * based Linux systems those actually are quite common.
416 */
417 drv1 = find_hdev_driver(filename);
418 if (drv1) {
419 return drv1;
420 }
421
422 if (!path_has_protocol(filename)) {
423 return bdrv_find_format("file");
424 }
425 p = strchr(filename, ':');
426 assert(p != NULL);
427 len = p - filename;
428 if (len > sizeof(protocol) - 1)
429 len = sizeof(protocol) - 1;
430 memcpy(protocol, filename, len);
431 protocol[len] = '\0';
432 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
433 if (drv1->protocol_name &&
434 !strcmp(drv1->protocol_name, protocol)) {
435 return drv1;
436 }
437 }
438 return NULL;
439 }
440
441 static int find_image_format(const char *filename, BlockDriver **pdrv)
442 {
443 int ret, score, score_max;
444 BlockDriver *drv1, *drv;
445 uint8_t buf[2048];
446 BlockDriverState *bs;
447
448 ret = bdrv_file_open(&bs, filename, 0);
449 if (ret < 0) {
450 *pdrv = NULL;
451 return ret;
452 }
453
454 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
455 if (bs->sg || !bdrv_is_inserted(bs)) {
456 bdrv_delete(bs);
457 drv = bdrv_find_format("raw");
458 if (!drv) {
459 ret = -ENOENT;
460 }
461 *pdrv = drv;
462 return ret;
463 }
464
465 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
466 bdrv_delete(bs);
467 if (ret < 0) {
468 *pdrv = NULL;
469 return ret;
470 }
471
472 score_max = 0;
473 drv = NULL;
474 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
475 if (drv1->bdrv_probe) {
476 score = drv1->bdrv_probe(buf, ret, filename);
477 if (score > score_max) {
478 score_max = score;
479 drv = drv1;
480 }
481 }
482 }
483 if (!drv) {
484 ret = -ENOENT;
485 }
486 *pdrv = drv;
487 return ret;
488 }
489
490 /**
491 * Set the current 'total_sectors' value
492 */
493 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
494 {
495 BlockDriver *drv = bs->drv;
496
497 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
498 if (bs->sg)
499 return 0;
500
501 /* query actual device if possible, otherwise just trust the hint */
502 if (drv->bdrv_getlength) {
503 int64_t length = drv->bdrv_getlength(bs);
504 if (length < 0) {
505 return length;
506 }
507 hint = length >> BDRV_SECTOR_BITS;
508 }
509
510 bs->total_sectors = hint;
511 return 0;
512 }
513
514 /**
515 * Set open flags for a given cache mode
516 *
517 * Return 0 on success, -1 if the cache mode was invalid.
518 */
519 int bdrv_parse_cache_flags(const char *mode, int *flags)
520 {
521 *flags &= ~BDRV_O_CACHE_MASK;
522
523 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
524 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
525 } else if (!strcmp(mode, "directsync")) {
526 *flags |= BDRV_O_NOCACHE;
527 } else if (!strcmp(mode, "writeback")) {
528 *flags |= BDRV_O_CACHE_WB;
529 } else if (!strcmp(mode, "unsafe")) {
530 *flags |= BDRV_O_CACHE_WB;
531 *flags |= BDRV_O_NO_FLUSH;
532 } else if (!strcmp(mode, "writethrough")) {
533 /* this is the default */
534 } else {
535 return -1;
536 }
537
538 return 0;
539 }
540
541 /**
542 * The copy-on-read flag is actually a reference count so multiple users may
543 * use the feature without worrying about clobbering its previous state.
544 * Copy-on-read stays enabled until all users have called to disable it.
545 */
546 void bdrv_enable_copy_on_read(BlockDriverState *bs)
547 {
548 bs->copy_on_read++;
549 }
550
551 void bdrv_disable_copy_on_read(BlockDriverState *bs)
552 {
553 assert(bs->copy_on_read > 0);
554 bs->copy_on_read--;
555 }
556
557 /*
558 * Common part for opening disk images and files
559 */
560 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
561 int flags, BlockDriver *drv)
562 {
563 int ret, open_flags;
564
565 assert(drv != NULL);
566
567 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
568
569 bs->file = NULL;
570 bs->total_sectors = 0;
571 bs->encrypted = 0;
572 bs->valid_key = 0;
573 bs->sg = 0;
574 bs->open_flags = flags;
575 bs->growable = 0;
576 bs->buffer_alignment = 512;
577
578 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
579 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
580 bdrv_enable_copy_on_read(bs);
581 }
582
583 pstrcpy(bs->filename, sizeof(bs->filename), filename);
584 bs->backing_file[0] = '\0';
585
586 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
587 return -ENOTSUP;
588 }
589
590 bs->drv = drv;
591 bs->opaque = g_malloc0(drv->instance_size);
592
593 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
594
595 /*
596 * Clear flags that are internal to the block layer before opening the
597 * image.
598 */
599 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
600
601 /*
602 * Snapshots should be writable.
603 */
604 if (bs->is_temporary) {
605 open_flags |= BDRV_O_RDWR;
606 }
607
608 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
609
610 /* Open the image, either directly or using a protocol */
611 if (drv->bdrv_file_open) {
612 ret = drv->bdrv_file_open(bs, filename, open_flags);
613 } else {
614 ret = bdrv_file_open(&bs->file, filename, open_flags);
615 if (ret >= 0) {
616 ret = drv->bdrv_open(bs, open_flags);
617 }
618 }
619
620 if (ret < 0) {
621 goto free_and_fail;
622 }
623
624 ret = refresh_total_sectors(bs, bs->total_sectors);
625 if (ret < 0) {
626 goto free_and_fail;
627 }
628
629 #ifndef _WIN32
630 if (bs->is_temporary) {
631 unlink(filename);
632 }
633 #endif
634 return 0;
635
636 free_and_fail:
637 if (bs->file) {
638 bdrv_delete(bs->file);
639 bs->file = NULL;
640 }
641 g_free(bs->opaque);
642 bs->opaque = NULL;
643 bs->drv = NULL;
644 return ret;
645 }
646
647 /*
648 * Opens a file using a protocol (file, host_device, nbd, ...)
649 */
650 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
651 {
652 BlockDriverState *bs;
653 BlockDriver *drv;
654 int ret;
655
656 drv = bdrv_find_protocol(filename);
657 if (!drv) {
658 return -ENOENT;
659 }
660
661 bs = bdrv_new("");
662 ret = bdrv_open_common(bs, filename, flags, drv);
663 if (ret < 0) {
664 bdrv_delete(bs);
665 return ret;
666 }
667 bs->growable = 1;
668 *pbs = bs;
669 return 0;
670 }
671
672 /*
673 * Opens a disk image (raw, qcow2, vmdk, ...)
674 */
675 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
676 BlockDriver *drv)
677 {
678 int ret;
679 char tmp_filename[PATH_MAX];
680
681 if (flags & BDRV_O_SNAPSHOT) {
682 BlockDriverState *bs1;
683 int64_t total_size;
684 int is_protocol = 0;
685 BlockDriver *bdrv_qcow2;
686 QEMUOptionParameter *options;
687 char backing_filename[PATH_MAX];
688
689 /* if snapshot, we create a temporary backing file and open it
690 instead of opening 'filename' directly */
691
692 /* if there is a backing file, use it */
693 bs1 = bdrv_new("");
694 ret = bdrv_open(bs1, filename, 0, drv);
695 if (ret < 0) {
696 bdrv_delete(bs1);
697 return ret;
698 }
699 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
700
701 if (bs1->drv && bs1->drv->protocol_name)
702 is_protocol = 1;
703
704 bdrv_delete(bs1);
705
706 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
707
708 /* Real path is meaningless for protocols */
709 if (is_protocol)
710 snprintf(backing_filename, sizeof(backing_filename),
711 "%s", filename);
712 else if (!realpath(filename, backing_filename))
713 return -errno;
714
715 bdrv_qcow2 = bdrv_find_format("qcow2");
716 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
717
718 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
719 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
720 if (drv) {
721 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
722 drv->format_name);
723 }
724
725 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
726 free_option_parameters(options);
727 if (ret < 0) {
728 return ret;
729 }
730
731 filename = tmp_filename;
732 drv = bdrv_qcow2;
733 bs->is_temporary = 1;
734 }
735
736 /* Find the right image format driver */
737 if (!drv) {
738 ret = find_image_format(filename, &drv);
739 }
740
741 if (!drv) {
742 goto unlink_and_fail;
743 }
744
745 /* Open the image */
746 ret = bdrv_open_common(bs, filename, flags, drv);
747 if (ret < 0) {
748 goto unlink_and_fail;
749 }
750
751 /* If there is a backing file, use it */
752 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
753 char backing_filename[PATH_MAX];
754 int back_flags;
755 BlockDriver *back_drv = NULL;
756
757 bs->backing_hd = bdrv_new("");
758
759 if (path_has_protocol(bs->backing_file)) {
760 pstrcpy(backing_filename, sizeof(backing_filename),
761 bs->backing_file);
762 } else {
763 path_combine(backing_filename, sizeof(backing_filename),
764 filename, bs->backing_file);
765 }
766
767 if (bs->backing_format[0] != '\0') {
768 back_drv = bdrv_find_format(bs->backing_format);
769 }
770
771 /* backing files always opened read-only */
772 back_flags =
773 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
774
775 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
776 if (ret < 0) {
777 bdrv_close(bs);
778 return ret;
779 }
780 if (bs->is_temporary) {
781 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
782 } else {
783 /* base image inherits from "parent" */
784 bs->backing_hd->keep_read_only = bs->keep_read_only;
785 }
786 }
787
788 if (!bdrv_key_required(bs)) {
789 bdrv_dev_change_media_cb(bs, true);
790 }
791
792 /* throttling disk I/O limits */
793 if (bs->io_limits_enabled) {
794 bdrv_io_limits_enable(bs);
795 }
796
797 return 0;
798
799 unlink_and_fail:
800 if (bs->is_temporary) {
801 unlink(filename);
802 }
803 return ret;
804 }
805
806 void bdrv_close(BlockDriverState *bs)
807 {
808 if (bs->drv) {
809 if (bs == bs_snapshots) {
810 bs_snapshots = NULL;
811 }
812 if (bs->backing_hd) {
813 bdrv_delete(bs->backing_hd);
814 bs->backing_hd = NULL;
815 }
816 bs->drv->bdrv_close(bs);
817 g_free(bs->opaque);
818 #ifdef _WIN32
819 if (bs->is_temporary) {
820 unlink(bs->filename);
821 }
822 #endif
823 bs->opaque = NULL;
824 bs->drv = NULL;
825 bs->copy_on_read = 0;
826
827 if (bs->file != NULL) {
828 bdrv_close(bs->file);
829 }
830
831 bdrv_dev_change_media_cb(bs, false);
832 }
833
834 /*throttling disk I/O limits*/
835 if (bs->io_limits_enabled) {
836 bdrv_io_limits_disable(bs);
837 }
838 }
839
840 void bdrv_close_all(void)
841 {
842 BlockDriverState *bs;
843
844 QTAILQ_FOREACH(bs, &bdrv_states, list) {
845 bdrv_close(bs);
846 }
847 }
848
849 /* make a BlockDriverState anonymous by removing from bdrv_state list.
850 Also, NULL terminate the device_name to prevent double remove */
851 void bdrv_make_anon(BlockDriverState *bs)
852 {
853 if (bs->device_name[0] != '\0') {
854 QTAILQ_REMOVE(&bdrv_states, bs, list);
855 }
856 bs->device_name[0] = '\0';
857 }
858
859 void bdrv_delete(BlockDriverState *bs)
860 {
861 assert(!bs->dev);
862
863 /* remove from list, if necessary */
864 bdrv_make_anon(bs);
865
866 bdrv_close(bs);
867 if (bs->file != NULL) {
868 bdrv_delete(bs->file);
869 }
870
871 assert(bs != bs_snapshots);
872 g_free(bs);
873 }
874
875 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
876 /* TODO change to DeviceState *dev when all users are qdevified */
877 {
878 if (bs->dev) {
879 return -EBUSY;
880 }
881 bs->dev = dev;
882 bdrv_iostatus_reset(bs);
883 return 0;
884 }
885
886 /* TODO qdevified devices don't use this, remove when devices are qdevified */
887 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
888 {
889 if (bdrv_attach_dev(bs, dev) < 0) {
890 abort();
891 }
892 }
893
894 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
895 /* TODO change to DeviceState *dev when all users are qdevified */
896 {
897 assert(bs->dev == dev);
898 bs->dev = NULL;
899 bs->dev_ops = NULL;
900 bs->dev_opaque = NULL;
901 bs->buffer_alignment = 512;
902 }
903
904 /* TODO change to return DeviceState * when all users are qdevified */
905 void *bdrv_get_attached_dev(BlockDriverState *bs)
906 {
907 return bs->dev;
908 }
909
910 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
911 void *opaque)
912 {
913 bs->dev_ops = ops;
914 bs->dev_opaque = opaque;
915 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
916 bs_snapshots = NULL;
917 }
918 }
919
920 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
921 {
922 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
923 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
924 }
925 }
926
927 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
928 {
929 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
930 }
931
932 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
933 {
934 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
935 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
936 }
937 }
938
939 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
940 {
941 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
942 return bs->dev_ops->is_tray_open(bs->dev_opaque);
943 }
944 return false;
945 }
946
947 static void bdrv_dev_resize_cb(BlockDriverState *bs)
948 {
949 if (bs->dev_ops && bs->dev_ops->resize_cb) {
950 bs->dev_ops->resize_cb(bs->dev_opaque);
951 }
952 }
953
954 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
955 {
956 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
957 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
958 }
959 return false;
960 }
961
962 /*
963 * Run consistency checks on an image
964 *
965 * Returns 0 if the check could be completed (it doesn't mean that the image is
966 * free of errors) or -errno when an internal error occurred. The results of the
967 * check are stored in res.
968 */
969 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
970 {
971 if (bs->drv->bdrv_check == NULL) {
972 return -ENOTSUP;
973 }
974
975 memset(res, 0, sizeof(*res));
976 return bs->drv->bdrv_check(bs, res);
977 }
978
979 #define COMMIT_BUF_SECTORS 2048
980
981 /* commit COW file into the raw image */
982 int bdrv_commit(BlockDriverState *bs)
983 {
984 BlockDriver *drv = bs->drv;
985 BlockDriver *backing_drv;
986 int64_t sector, total_sectors;
987 int n, ro, open_flags;
988 int ret = 0, rw_ret = 0;
989 uint8_t *buf;
990 char filename[1024];
991 BlockDriverState *bs_rw, *bs_ro;
992
993 if (!drv)
994 return -ENOMEDIUM;
995
996 if (!bs->backing_hd) {
997 return -ENOTSUP;
998 }
999
1000 if (bs->backing_hd->keep_read_only) {
1001 return -EACCES;
1002 }
1003
1004 backing_drv = bs->backing_hd->drv;
1005 ro = bs->backing_hd->read_only;
1006 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1007 open_flags = bs->backing_hd->open_flags;
1008
1009 if (ro) {
1010 /* re-open as RW */
1011 bdrv_delete(bs->backing_hd);
1012 bs->backing_hd = NULL;
1013 bs_rw = bdrv_new("");
1014 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
1015 backing_drv);
1016 if (rw_ret < 0) {
1017 bdrv_delete(bs_rw);
1018 /* try to re-open read-only */
1019 bs_ro = bdrv_new("");
1020 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1021 backing_drv);
1022 if (ret < 0) {
1023 bdrv_delete(bs_ro);
1024 /* drive not functional anymore */
1025 bs->drv = NULL;
1026 return ret;
1027 }
1028 bs->backing_hd = bs_ro;
1029 return rw_ret;
1030 }
1031 bs->backing_hd = bs_rw;
1032 }
1033
1034 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1035 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1036
1037 for (sector = 0; sector < total_sectors; sector += n) {
1038 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
1039
1040 if (bdrv_read(bs, sector, buf, n) != 0) {
1041 ret = -EIO;
1042 goto ro_cleanup;
1043 }
1044
1045 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1046 ret = -EIO;
1047 goto ro_cleanup;
1048 }
1049 }
1050 }
1051
1052 if (drv->bdrv_make_empty) {
1053 ret = drv->bdrv_make_empty(bs);
1054 bdrv_flush(bs);
1055 }
1056
1057 /*
1058 * Make sure all data we wrote to the backing device is actually
1059 * stable on disk.
1060 */
1061 if (bs->backing_hd)
1062 bdrv_flush(bs->backing_hd);
1063
1064 ro_cleanup:
1065 g_free(buf);
1066
1067 if (ro) {
1068 /* re-open as RO */
1069 bdrv_delete(bs->backing_hd);
1070 bs->backing_hd = NULL;
1071 bs_ro = bdrv_new("");
1072 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1073 backing_drv);
1074 if (ret < 0) {
1075 bdrv_delete(bs_ro);
1076 /* drive not functional anymore */
1077 bs->drv = NULL;
1078 return ret;
1079 }
1080 bs->backing_hd = bs_ro;
1081 bs->backing_hd->keep_read_only = 0;
1082 }
1083
1084 return ret;
1085 }
1086
1087 void bdrv_commit_all(void)
1088 {
1089 BlockDriverState *bs;
1090
1091 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1092 bdrv_commit(bs);
1093 }
1094 }
1095
1096 struct BdrvTrackedRequest {
1097 BlockDriverState *bs;
1098 int64_t sector_num;
1099 int nb_sectors;
1100 bool is_write;
1101 QLIST_ENTRY(BdrvTrackedRequest) list;
1102 Coroutine *co; /* owner, used for deadlock detection */
1103 CoQueue wait_queue; /* coroutines blocked on this request */
1104 };
1105
1106 /**
1107 * Remove an active request from the tracked requests list
1108 *
1109 * This function should be called when a tracked request is completing.
1110 */
1111 static void tracked_request_end(BdrvTrackedRequest *req)
1112 {
1113 QLIST_REMOVE(req, list);
1114 qemu_co_queue_restart_all(&req->wait_queue);
1115 }
1116
1117 /**
1118 * Add an active request to the tracked requests list
1119 */
1120 static void tracked_request_begin(BdrvTrackedRequest *req,
1121 BlockDriverState *bs,
1122 int64_t sector_num,
1123 int nb_sectors, bool is_write)
1124 {
1125 *req = (BdrvTrackedRequest){
1126 .bs = bs,
1127 .sector_num = sector_num,
1128 .nb_sectors = nb_sectors,
1129 .is_write = is_write,
1130 .co = qemu_coroutine_self(),
1131 };
1132
1133 qemu_co_queue_init(&req->wait_queue);
1134
1135 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1136 }
1137
1138 /**
1139 * Round a region to cluster boundaries
1140 */
1141 static void round_to_clusters(BlockDriverState *bs,
1142 int64_t sector_num, int nb_sectors,
1143 int64_t *cluster_sector_num,
1144 int *cluster_nb_sectors)
1145 {
1146 BlockDriverInfo bdi;
1147
1148 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1149 *cluster_sector_num = sector_num;
1150 *cluster_nb_sectors = nb_sectors;
1151 } else {
1152 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1153 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1154 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1155 nb_sectors, c);
1156 }
1157 }
1158
1159 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1160 int64_t sector_num, int nb_sectors) {
1161 /* aaaa bbbb */
1162 if (sector_num >= req->sector_num + req->nb_sectors) {
1163 return false;
1164 }
1165 /* bbbb aaaa */
1166 if (req->sector_num >= sector_num + nb_sectors) {
1167 return false;
1168 }
1169 return true;
1170 }
1171
1172 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1173 int64_t sector_num, int nb_sectors)
1174 {
1175 BdrvTrackedRequest *req;
1176 int64_t cluster_sector_num;
1177 int cluster_nb_sectors;
1178 bool retry;
1179
1180 /* If we touch the same cluster it counts as an overlap. This guarantees
1181 * that allocating writes will be serialized and not race with each other
1182 * for the same cluster. For example, in copy-on-read it ensures that the
1183 * CoR read and write operations are atomic and guest writes cannot
1184 * interleave between them.
1185 */
1186 round_to_clusters(bs, sector_num, nb_sectors,
1187 &cluster_sector_num, &cluster_nb_sectors);
1188
1189 do {
1190 retry = false;
1191 QLIST_FOREACH(req, &bs->tracked_requests, list) {
1192 if (tracked_request_overlaps(req, cluster_sector_num,
1193 cluster_nb_sectors)) {
1194 /* Hitting this means there was a reentrant request, for
1195 * example, a block driver issuing nested requests. This must
1196 * never happen since it means deadlock.
1197 */
1198 assert(qemu_coroutine_self() != req->co);
1199
1200 qemu_co_queue_wait(&req->wait_queue);
1201 retry = true;
1202 break;
1203 }
1204 }
1205 } while (retry);
1206 }
1207
1208 /*
1209 * Return values:
1210 * 0 - success
1211 * -EINVAL - backing format specified, but no file
1212 * -ENOSPC - can't update the backing file because no space is left in the
1213 * image file header
1214 * -ENOTSUP - format driver doesn't support changing the backing file
1215 */
1216 int bdrv_change_backing_file(BlockDriverState *bs,
1217 const char *backing_file, const char *backing_fmt)
1218 {
1219 BlockDriver *drv = bs->drv;
1220
1221 if (drv->bdrv_change_backing_file != NULL) {
1222 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
1223 } else {
1224 return -ENOTSUP;
1225 }
1226 }
1227
1228 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1229 size_t size)
1230 {
1231 int64_t len;
1232
1233 if (!bdrv_is_inserted(bs))
1234 return -ENOMEDIUM;
1235
1236 if (bs->growable)
1237 return 0;
1238
1239 len = bdrv_getlength(bs);
1240
1241 if (offset < 0)
1242 return -EIO;
1243
1244 if ((offset > len) || (len - offset < size))
1245 return -EIO;
1246
1247 return 0;
1248 }
1249
1250 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1251 int nb_sectors)
1252 {
1253 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1254 nb_sectors * BDRV_SECTOR_SIZE);
1255 }
1256
1257 typedef struct RwCo {
1258 BlockDriverState *bs;
1259 int64_t sector_num;
1260 int nb_sectors;
1261 QEMUIOVector *qiov;
1262 bool is_write;
1263 int ret;
1264 } RwCo;
1265
1266 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1267 {
1268 RwCo *rwco = opaque;
1269
1270 if (!rwco->is_write) {
1271 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
1272 rwco->nb_sectors, rwco->qiov);
1273 } else {
1274 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
1275 rwco->nb_sectors, rwco->qiov);
1276 }
1277 }
1278
1279 /*
1280 * Process a synchronous request using coroutines
1281 */
1282 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1283 int nb_sectors, bool is_write)
1284 {
1285 QEMUIOVector qiov;
1286 struct iovec iov = {
1287 .iov_base = (void *)buf,
1288 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1289 };
1290 Coroutine *co;
1291 RwCo rwco = {
1292 .bs = bs,
1293 .sector_num = sector_num,
1294 .nb_sectors = nb_sectors,
1295 .qiov = &qiov,
1296 .is_write = is_write,
1297 .ret = NOT_DONE,
1298 };
1299
1300 qemu_iovec_init_external(&qiov, &iov, 1);
1301
1302 if (qemu_in_coroutine()) {
1303 /* Fast-path if already in coroutine context */
1304 bdrv_rw_co_entry(&rwco);
1305 } else {
1306 co = qemu_coroutine_create(bdrv_rw_co_entry);
1307 qemu_coroutine_enter(co, &rwco);
1308 while (rwco.ret == NOT_DONE) {
1309 qemu_aio_wait();
1310 }
1311 }
1312 return rwco.ret;
1313 }
1314
1315 /* return < 0 if error. See bdrv_write() for the return codes */
1316 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
1317 uint8_t *buf, int nb_sectors)
1318 {
1319 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
1320 }
1321
1322 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
1323 int nb_sectors, int dirty)
1324 {
1325 int64_t start, end;
1326 unsigned long val, idx, bit;
1327
1328 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
1329 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
1330
1331 for (; start <= end; start++) {
1332 idx = start / (sizeof(unsigned long) * 8);
1333 bit = start % (sizeof(unsigned long) * 8);
1334 val = bs->dirty_bitmap[idx];
1335 if (dirty) {
1336 if (!(val & (1UL << bit))) {
1337 bs->dirty_count++;
1338 val |= 1UL << bit;
1339 }
1340 } else {
1341 if (val & (1UL << bit)) {
1342 bs->dirty_count--;
1343 val &= ~(1UL << bit);
1344 }
1345 }
1346 bs->dirty_bitmap[idx] = val;
1347 }
1348 }
1349
1350 /* Return < 0 if error. Important errors are:
1351 -EIO generic I/O error (may happen for all errors)
1352 -ENOMEDIUM No media inserted.
1353 -EINVAL Invalid sector number or nb_sectors
1354 -EACCES Trying to write a read-only device
1355 */
1356 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
1357 const uint8_t *buf, int nb_sectors)
1358 {
1359 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
1360 }
1361
1362 int bdrv_pread(BlockDriverState *bs, int64_t offset,
1363 void *buf, int count1)
1364 {
1365 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1366 int len, nb_sectors, count;
1367 int64_t sector_num;
1368 int ret;
1369
1370 count = count1;
1371 /* first read to align to sector start */
1372 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1373 if (len > count)
1374 len = count;
1375 sector_num = offset >> BDRV_SECTOR_BITS;
1376 if (len > 0) {
1377 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1378 return ret;
1379 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1380 count -= len;
1381 if (count == 0)
1382 return count1;
1383 sector_num++;
1384 buf += len;
1385 }
1386
1387 /* read the sectors "in place" */
1388 nb_sectors = count >> BDRV_SECTOR_BITS;
1389 if (nb_sectors > 0) {
1390 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1391 return ret;
1392 sector_num += nb_sectors;
1393 len = nb_sectors << BDRV_SECTOR_BITS;
1394 buf += len;
1395 count -= len;
1396 }
1397
1398 /* add data from the last sector */
1399 if (count > 0) {
1400 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1401 return ret;
1402 memcpy(buf, tmp_buf, count);
1403 }
1404 return count1;
1405 }
1406
1407 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1408 const void *buf, int count1)
1409 {
1410 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1411 int len, nb_sectors, count;
1412 int64_t sector_num;
1413 int ret;
1414
1415 count = count1;
1416 /* first write to align to sector start */
1417 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1418 if (len > count)
1419 len = count;
1420 sector_num = offset >> BDRV_SECTOR_BITS;
1421 if (len > 0) {
1422 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1423 return ret;
1424 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1425 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1426 return ret;
1427 count -= len;
1428 if (count == 0)
1429 return count1;
1430 sector_num++;
1431 buf += len;
1432 }
1433
1434 /* write the sectors "in place" */
1435 nb_sectors = count >> BDRV_SECTOR_BITS;
1436 if (nb_sectors > 0) {
1437 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1438 return ret;
1439 sector_num += nb_sectors;
1440 len = nb_sectors << BDRV_SECTOR_BITS;
1441 buf += len;
1442 count -= len;
1443 }
1444
1445 /* add data from the last sector */
1446 if (count > 0) {
1447 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1448 return ret;
1449 memcpy(tmp_buf, buf, count);
1450 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1451 return ret;
1452 }
1453 return count1;
1454 }
1455
1456 /*
1457 * Writes to the file and ensures that no writes are reordered across this
1458 * request (acts as a barrier)
1459 *
1460 * Returns 0 on success, -errno in error cases.
1461 */
1462 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1463 const void *buf, int count)
1464 {
1465 int ret;
1466
1467 ret = bdrv_pwrite(bs, offset, buf, count);
1468 if (ret < 0) {
1469 return ret;
1470 }
1471
1472 /* No flush needed for cache modes that use O_DSYNC */
1473 if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) {
1474 bdrv_flush(bs);
1475 }
1476
1477 return 0;
1478 }
1479
1480 static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1481 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1482 {
1483 /* Perform I/O through a temporary buffer so that users who scribble over
1484 * their read buffer while the operation is in progress do not end up
1485 * modifying the image file. This is critical for zero-copy guest I/O
1486 * where anything might happen inside guest memory.
1487 */
1488 void *bounce_buffer;
1489
1490 struct iovec iov;
1491 QEMUIOVector bounce_qiov;
1492 int64_t cluster_sector_num;
1493 int cluster_nb_sectors;
1494 size_t skip_bytes;
1495 int ret;
1496
1497 /* Cover entire cluster so no additional backing file I/O is required when
1498 * allocating cluster in the image file.
1499 */
1500 round_to_clusters(bs, sector_num, nb_sectors,
1501 &cluster_sector_num, &cluster_nb_sectors);
1502
1503 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
1504 cluster_sector_num, cluster_nb_sectors);
1505
1506 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
1507 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
1508 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
1509
1510 ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
1511 &bounce_qiov);
1512 if (ret < 0) {
1513 goto err;
1514 }
1515
1516 ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
1517 &bounce_qiov);
1518 if (ret < 0) {
1519 /* It might be okay to ignore write errors for guest requests. If this
1520 * is a deliberate copy-on-read then we don't want to ignore the error.
1521 * Simply report it in all cases.
1522 */
1523 goto err;
1524 }
1525
1526 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
1527 qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
1528 nb_sectors * BDRV_SECTOR_SIZE);
1529
1530 err:
1531 qemu_vfree(bounce_buffer);
1532 return ret;
1533 }
1534
1535 /*
1536 * Handle a read request in coroutine context
1537 */
1538 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1539 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1540 {
1541 BlockDriver *drv = bs->drv;
1542 BdrvTrackedRequest req;
1543 int ret;
1544
1545 if (!drv) {
1546 return -ENOMEDIUM;
1547 }
1548 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1549 return -EIO;
1550 }
1551
1552 /* throttling disk read I/O */
1553 if (bs->io_limits_enabled) {
1554 bdrv_io_limits_intercept(bs, false, nb_sectors);
1555 }
1556
1557 if (bs->copy_on_read) {
1558 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1559 }
1560
1561 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
1562
1563 if (bs->copy_on_read) {
1564 int pnum;
1565
1566 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
1567 if (ret < 0) {
1568 goto out;
1569 }
1570
1571 if (!ret || pnum != nb_sectors) {
1572 ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov);
1573 goto out;
1574 }
1575 }
1576
1577 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1578
1579 out:
1580 tracked_request_end(&req);
1581 return ret;
1582 }
1583
1584 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1585 int nb_sectors, QEMUIOVector *qiov)
1586 {
1587 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1588
1589 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
1590 }
1591
1592 /*
1593 * Handle a write request in coroutine context
1594 */
1595 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1596 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1597 {
1598 BlockDriver *drv = bs->drv;
1599 BdrvTrackedRequest req;
1600 int ret;
1601
1602 if (!bs->drv) {
1603 return -ENOMEDIUM;
1604 }
1605 if (bs->read_only) {
1606 return -EACCES;
1607 }
1608 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1609 return -EIO;
1610 }
1611
1612 /* throttling disk write I/O */
1613 if (bs->io_limits_enabled) {
1614 bdrv_io_limits_intercept(bs, true, nb_sectors);
1615 }
1616
1617 if (bs->copy_on_read) {
1618 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1619 }
1620
1621 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
1622
1623 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1624
1625 if (bs->dirty_bitmap) {
1626 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1627 }
1628
1629 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
1630 bs->wr_highest_sector = sector_num + nb_sectors - 1;
1631 }
1632
1633 tracked_request_end(&req);
1634
1635 return ret;
1636 }
1637
1638 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1639 int nb_sectors, QEMUIOVector *qiov)
1640 {
1641 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1642
1643 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov);
1644 }
1645
1646 /**
1647 * Truncate file to 'offset' bytes (needed only for file protocols)
1648 */
1649 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1650 {
1651 BlockDriver *drv = bs->drv;
1652 int ret;
1653 if (!drv)
1654 return -ENOMEDIUM;
1655 if (!drv->bdrv_truncate)
1656 return -ENOTSUP;
1657 if (bs->read_only)
1658 return -EACCES;
1659 if (bdrv_in_use(bs))
1660 return -EBUSY;
1661 ret = drv->bdrv_truncate(bs, offset);
1662 if (ret == 0) {
1663 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1664 bdrv_dev_resize_cb(bs);
1665 }
1666 return ret;
1667 }
1668
1669 /**
1670 * Length of a allocated file in bytes. Sparse files are counted by actual
1671 * allocated space. Return < 0 if error or unknown.
1672 */
1673 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
1674 {
1675 BlockDriver *drv = bs->drv;
1676 if (!drv) {
1677 return -ENOMEDIUM;
1678 }
1679 if (drv->bdrv_get_allocated_file_size) {
1680 return drv->bdrv_get_allocated_file_size(bs);
1681 }
1682 if (bs->file) {
1683 return bdrv_get_allocated_file_size(bs->file);
1684 }
1685 return -ENOTSUP;
1686 }
1687
1688 /**
1689 * Length of a file in bytes. Return < 0 if error or unknown.
1690 */
1691 int64_t bdrv_getlength(BlockDriverState *bs)
1692 {
1693 BlockDriver *drv = bs->drv;
1694 if (!drv)
1695 return -ENOMEDIUM;
1696
1697 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
1698 if (drv->bdrv_getlength) {
1699 return drv->bdrv_getlength(bs);
1700 }
1701 }
1702 return bs->total_sectors * BDRV_SECTOR_SIZE;
1703 }
1704
1705 /* return 0 as number of sectors if no device present or error */
1706 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1707 {
1708 int64_t length;
1709 length = bdrv_getlength(bs);
1710 if (length < 0)
1711 length = 0;
1712 else
1713 length = length >> BDRV_SECTOR_BITS;
1714 *nb_sectors_ptr = length;
1715 }
1716
1717 struct partition {
1718 uint8_t boot_ind; /* 0x80 - active */
1719 uint8_t head; /* starting head */
1720 uint8_t sector; /* starting sector */
1721 uint8_t cyl; /* starting cylinder */
1722 uint8_t sys_ind; /* What partition type */
1723 uint8_t end_head; /* end head */
1724 uint8_t end_sector; /* end sector */
1725 uint8_t end_cyl; /* end cylinder */
1726 uint32_t start_sect; /* starting sector counting from 0 */
1727 uint32_t nr_sects; /* nr of sectors in partition */
1728 } QEMU_PACKED;
1729
1730 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1731 static int guess_disk_lchs(BlockDriverState *bs,
1732 int *pcylinders, int *pheads, int *psectors)
1733 {
1734 uint8_t buf[BDRV_SECTOR_SIZE];
1735 int ret, i, heads, sectors, cylinders;
1736 struct partition *p;
1737 uint32_t nr_sects;
1738 uint64_t nb_sectors;
1739
1740 bdrv_get_geometry(bs, &nb_sectors);
1741
1742 ret = bdrv_read(bs, 0, buf, 1);
1743 if (ret < 0)
1744 return -1;
1745 /* test msdos magic */
1746 if (buf[510] != 0x55 || buf[511] != 0xaa)
1747 return -1;
1748 for(i = 0; i < 4; i++) {
1749 p = ((struct partition *)(buf + 0x1be)) + i;
1750 nr_sects = le32_to_cpu(p->nr_sects);
1751 if (nr_sects && p->end_head) {
1752 /* We make the assumption that the partition terminates on
1753 a cylinder boundary */
1754 heads = p->end_head + 1;
1755 sectors = p->end_sector & 63;
1756 if (sectors == 0)
1757 continue;
1758 cylinders = nb_sectors / (heads * sectors);
1759 if (cylinders < 1 || cylinders > 16383)
1760 continue;
1761 *pheads = heads;
1762 *psectors = sectors;
1763 *pcylinders = cylinders;
1764 #if 0
1765 printf("guessed geometry: LCHS=%d %d %d\n",
1766 cylinders, heads, sectors);
1767 #endif
1768 return 0;
1769 }
1770 }
1771 return -1;
1772 }
1773
1774 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1775 {
1776 int translation, lba_detected = 0;
1777 int cylinders, heads, secs;
1778 uint64_t nb_sectors;
1779
1780 /* if a geometry hint is available, use it */
1781 bdrv_get_geometry(bs, &nb_sectors);
1782 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1783 translation = bdrv_get_translation_hint(bs);
1784 if (cylinders != 0) {
1785 *pcyls = cylinders;
1786 *pheads = heads;
1787 *psecs = secs;
1788 } else {
1789 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1790 if (heads > 16) {
1791 /* if heads > 16, it means that a BIOS LBA
1792 translation was active, so the default
1793 hardware geometry is OK */
1794 lba_detected = 1;
1795 goto default_geometry;
1796 } else {
1797 *pcyls = cylinders;
1798 *pheads = heads;
1799 *psecs = secs;
1800 /* disable any translation to be in sync with
1801 the logical geometry */
1802 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1803 bdrv_set_translation_hint(bs,
1804 BIOS_ATA_TRANSLATION_NONE);
1805 }
1806 }
1807 } else {
1808 default_geometry:
1809 /* if no geometry, use a standard physical disk geometry */
1810 cylinders = nb_sectors / (16 * 63);
1811
1812 if (cylinders > 16383)
1813 cylinders = 16383;
1814 else if (cylinders < 2)
1815 cylinders = 2;
1816 *pcyls = cylinders;
1817 *pheads = 16;
1818 *psecs = 63;
1819 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1820 if ((*pcyls * *pheads) <= 131072) {
1821 bdrv_set_translation_hint(bs,
1822 BIOS_ATA_TRANSLATION_LARGE);
1823 } else {
1824 bdrv_set_translation_hint(bs,
1825 BIOS_ATA_TRANSLATION_LBA);
1826 }
1827 }
1828 }
1829 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1830 }
1831 }
1832
1833 void bdrv_set_geometry_hint(BlockDriverState *bs,
1834 int cyls, int heads, int secs)
1835 {
1836 bs->cyls = cyls;
1837 bs->heads = heads;
1838 bs->secs = secs;
1839 }
1840
1841 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1842 {
1843 bs->translation = translation;
1844 }
1845
1846 void bdrv_get_geometry_hint(BlockDriverState *bs,
1847 int *pcyls, int *pheads, int *psecs)
1848 {
1849 *pcyls = bs->cyls;
1850 *pheads = bs->heads;
1851 *psecs = bs->secs;
1852 }
1853
1854 /* throttling disk io limits */
1855 void bdrv_set_io_limits(BlockDriverState *bs,
1856 BlockIOLimit *io_limits)
1857 {
1858 bs->io_limits = *io_limits;
1859 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
1860 }
1861
1862 /* Recognize floppy formats */
1863 typedef struct FDFormat {
1864 FDriveType drive;
1865 uint8_t last_sect;
1866 uint8_t max_track;
1867 uint8_t max_head;
1868 } FDFormat;
1869
1870 static const FDFormat fd_formats[] = {
1871 /* First entry is default format */
1872 /* 1.44 MB 3"1/2 floppy disks */
1873 { FDRIVE_DRV_144, 18, 80, 1, },
1874 { FDRIVE_DRV_144, 20, 80, 1, },
1875 { FDRIVE_DRV_144, 21, 80, 1, },
1876 { FDRIVE_DRV_144, 21, 82, 1, },
1877 { FDRIVE_DRV_144, 21, 83, 1, },
1878 { FDRIVE_DRV_144, 22, 80, 1, },
1879 { FDRIVE_DRV_144, 23, 80, 1, },
1880 { FDRIVE_DRV_144, 24, 80, 1, },
1881 /* 2.88 MB 3"1/2 floppy disks */
1882 { FDRIVE_DRV_288, 36, 80, 1, },
1883 { FDRIVE_DRV_288, 39, 80, 1, },
1884 { FDRIVE_DRV_288, 40, 80, 1, },
1885 { FDRIVE_DRV_288, 44, 80, 1, },
1886 { FDRIVE_DRV_288, 48, 80, 1, },
1887 /* 720 kB 3"1/2 floppy disks */
1888 { FDRIVE_DRV_144, 9, 80, 1, },
1889 { FDRIVE_DRV_144, 10, 80, 1, },
1890 { FDRIVE_DRV_144, 10, 82, 1, },
1891 { FDRIVE_DRV_144, 10, 83, 1, },
1892 { FDRIVE_DRV_144, 13, 80, 1, },
1893 { FDRIVE_DRV_144, 14, 80, 1, },
1894 /* 1.2 MB 5"1/4 floppy disks */
1895 { FDRIVE_DRV_120, 15, 80, 1, },
1896 { FDRIVE_DRV_120, 18, 80, 1, },
1897 { FDRIVE_DRV_120, 18, 82, 1, },
1898 { FDRIVE_DRV_120, 18, 83, 1, },
1899 { FDRIVE_DRV_120, 20, 80, 1, },
1900 /* 720 kB 5"1/4 floppy disks */
1901 { FDRIVE_DRV_120, 9, 80, 1, },
1902 { FDRIVE_DRV_120, 11, 80, 1, },
1903 /* 360 kB 5"1/4 floppy disks */
1904 { FDRIVE_DRV_120, 9, 40, 1, },
1905 { FDRIVE_DRV_120, 9, 40, 0, },
1906 { FDRIVE_DRV_120, 10, 41, 1, },
1907 { FDRIVE_DRV_120, 10, 42, 1, },
1908 /* 320 kB 5"1/4 floppy disks */
1909 { FDRIVE_DRV_120, 8, 40, 1, },
1910 { FDRIVE_DRV_120, 8, 40, 0, },
1911 /* 360 kB must match 5"1/4 better than 3"1/2... */
1912 { FDRIVE_DRV_144, 9, 80, 0, },
1913 /* end */
1914 { FDRIVE_DRV_NONE, -1, -1, 0, },
1915 };
1916
1917 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
1918 int *max_track, int *last_sect,
1919 FDriveType drive_in, FDriveType *drive)
1920 {
1921 const FDFormat *parse;
1922 uint64_t nb_sectors, size;
1923 int i, first_match, match;
1924
1925 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
1926 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
1927 /* User defined disk */
1928 } else {
1929 bdrv_get_geometry(bs, &nb_sectors);
1930 match = -1;
1931 first_match = -1;
1932 for (i = 0; ; i++) {
1933 parse = &fd_formats[i];
1934 if (parse->drive == FDRIVE_DRV_NONE) {
1935 break;
1936 }
1937 if (drive_in == parse->drive ||
1938 drive_in == FDRIVE_DRV_NONE) {
1939 size = (parse->max_head + 1) * parse->max_track *
1940 parse->last_sect;
1941 if (nb_sectors == size) {
1942 match = i;
1943 break;
1944 }
1945 if (first_match == -1) {
1946 first_match = i;
1947 }
1948 }
1949 }
1950 if (match == -1) {
1951 if (first_match == -1) {
1952 match = 1;
1953 } else {
1954 match = first_match;
1955 }
1956 parse = &fd_formats[match];
1957 }
1958 *nb_heads = parse->max_head + 1;
1959 *max_track = parse->max_track;
1960 *last_sect = parse->last_sect;
1961 *drive = parse->drive;
1962 }
1963 }
1964
1965 int bdrv_get_translation_hint(BlockDriverState *bs)
1966 {
1967 return bs->translation;
1968 }
1969
1970 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
1971 BlockErrorAction on_write_error)
1972 {
1973 bs->on_read_error = on_read_error;
1974 bs->on_write_error = on_write_error;
1975 }
1976
1977 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
1978 {
1979 return is_read ? bs->on_read_error : bs->on_write_error;
1980 }
1981
1982 int bdrv_is_read_only(BlockDriverState *bs)
1983 {
1984 return bs->read_only;
1985 }
1986
1987 int bdrv_is_sg(BlockDriverState *bs)
1988 {
1989 return bs->sg;
1990 }
1991
1992 int bdrv_enable_write_cache(BlockDriverState *bs)
1993 {
1994 return bs->enable_write_cache;
1995 }
1996
1997 int bdrv_is_encrypted(BlockDriverState *bs)
1998 {
1999 if (bs->backing_hd && bs->backing_hd->encrypted)
2000 return 1;
2001 return bs->encrypted;
2002 }
2003
2004 int bdrv_key_required(BlockDriverState *bs)
2005 {
2006 BlockDriverState *backing_hd = bs->backing_hd;
2007
2008 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2009 return 1;
2010 return (bs->encrypted && !bs->valid_key);
2011 }
2012
2013 int bdrv_set_key(BlockDriverState *bs, const char *key)
2014 {
2015 int ret;
2016 if (bs->backing_hd && bs->backing_hd->encrypted) {
2017 ret = bdrv_set_key(bs->backing_hd, key);
2018 if (ret < 0)
2019 return ret;
2020 if (!bs->encrypted)
2021 return 0;
2022 }
2023 if (!bs->encrypted) {
2024 return -EINVAL;
2025 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2026 return -ENOMEDIUM;
2027 }
2028 ret = bs->drv->bdrv_set_key(bs, key);
2029 if (ret < 0) {
2030 bs->valid_key = 0;
2031 } else if (!bs->valid_key) {
2032 bs->valid_key = 1;
2033 /* call the change callback now, we skipped it on open */
2034 bdrv_dev_change_media_cb(bs, true);
2035 }
2036 return ret;
2037 }
2038
2039 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
2040 {
2041 if (!bs->drv) {
2042 buf[0] = '\0';
2043 } else {
2044 pstrcpy(buf, buf_size, bs->drv->format_name);
2045 }
2046 }
2047
2048 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
2049 void *opaque)
2050 {
2051 BlockDriver *drv;
2052
2053 QLIST_FOREACH(drv, &bdrv_drivers, list) {
2054 it(opaque, drv->format_name);
2055 }
2056 }
2057
2058 BlockDriverState *bdrv_find(const char *name)
2059 {
2060 BlockDriverState *bs;
2061
2062 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2063 if (!strcmp(name, bs->device_name)) {
2064 return bs;
2065 }
2066 }
2067 return NULL;
2068 }
2069
2070 BlockDriverState *bdrv_next(BlockDriverState *bs)
2071 {
2072 if (!bs) {
2073 return QTAILQ_FIRST(&bdrv_states);
2074 }
2075 return QTAILQ_NEXT(bs, list);
2076 }
2077
2078 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
2079 {
2080 BlockDriverState *bs;
2081
2082 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2083 it(opaque, bs);
2084 }
2085 }
2086
2087 const char *bdrv_get_device_name(BlockDriverState *bs)
2088 {
2089 return bs->device_name;
2090 }
2091
2092 void bdrv_flush_all(void)
2093 {
2094 BlockDriverState *bs;
2095
2096 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2097 if (!bdrv_is_read_only(bs) && bdrv_is_inserted(bs)) {
2098 bdrv_flush(bs);
2099 }
2100 }
2101 }
2102
2103 int bdrv_has_zero_init(BlockDriverState *bs)
2104 {
2105 assert(bs->drv);
2106
2107 if (bs->drv->bdrv_has_zero_init) {
2108 return bs->drv->bdrv_has_zero_init(bs);
2109 }
2110
2111 return 1;
2112 }
2113
2114 typedef struct BdrvCoIsAllocatedData {
2115 BlockDriverState *bs;
2116 int64_t sector_num;
2117 int nb_sectors;
2118 int *pnum;
2119 int ret;
2120 bool done;
2121 } BdrvCoIsAllocatedData;
2122
2123 /*
2124 * Returns true iff the specified sector is present in the disk image. Drivers
2125 * not implementing the functionality are assumed to not support backing files,
2126 * hence all their sectors are reported as allocated.
2127 *
2128 * If 'sector_num' is beyond the end of the disk image the return value is 0
2129 * and 'pnum' is set to 0.
2130 *
2131 * 'pnum' is set to the number of sectors (including and immediately following
2132 * the specified sector) that are known to be in the same
2133 * allocated/unallocated state.
2134 *
2135 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2136 * beyond the end of the disk image it will be clamped.
2137 */
2138 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2139 int nb_sectors, int *pnum)
2140 {
2141 int64_t n;
2142
2143 if (sector_num >= bs->total_sectors) {
2144 *pnum = 0;
2145 return 0;
2146 }
2147
2148 n = bs->total_sectors - sector_num;
2149 if (n < nb_sectors) {
2150 nb_sectors = n;
2151 }
2152
2153 if (!bs->drv->bdrv_co_is_allocated) {
2154 *pnum = nb_sectors;
2155 return 1;
2156 }
2157
2158 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2159 }
2160
2161 /* Coroutine wrapper for bdrv_is_allocated() */
2162 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2163 {
2164 BdrvCoIsAllocatedData *data = opaque;
2165 BlockDriverState *bs = data->bs;
2166
2167 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2168 data->pnum);
2169 data->done = true;
2170 }
2171
2172 /*
2173 * Synchronous wrapper around bdrv_co_is_allocated().
2174 *
2175 * See bdrv_co_is_allocated() for details.
2176 */
2177 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2178 int *pnum)
2179 {
2180 Coroutine *co;
2181 BdrvCoIsAllocatedData data = {
2182 .bs = bs,
2183 .sector_num = sector_num,
2184 .nb_sectors = nb_sectors,
2185 .pnum = pnum,
2186 .done = false,
2187 };
2188
2189 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2190 qemu_coroutine_enter(co, &data);
2191 while (!data.done) {
2192 qemu_aio_wait();
2193 }
2194 return data.ret;
2195 }
2196
2197 void bdrv_mon_event(const BlockDriverState *bdrv,
2198 BlockMonEventAction action, int is_read)
2199 {
2200 QObject *data;
2201 const char *action_str;
2202
2203 switch (action) {
2204 case BDRV_ACTION_REPORT:
2205 action_str = "report";
2206 break;
2207 case BDRV_ACTION_IGNORE:
2208 action_str = "ignore";
2209 break;
2210 case BDRV_ACTION_STOP:
2211 action_str = "stop";
2212 break;
2213 default:
2214 abort();
2215 }
2216
2217 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2218 bdrv->device_name,
2219 action_str,
2220 is_read ? "read" : "write");
2221 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
2222
2223 qobject_decref(data);
2224 }
2225
2226 BlockInfoList *qmp_query_block(Error **errp)
2227 {
2228 BlockInfoList *head = NULL, *cur_item = NULL;
2229 BlockDriverState *bs;
2230
2231 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2232 BlockInfoList *info = g_malloc0(sizeof(*info));
2233
2234 info->value = g_malloc0(sizeof(*info->value));
2235 info->value->device = g_strdup(bs->device_name);
2236 info->value->type = g_strdup("unknown");
2237 info->value->locked = bdrv_dev_is_medium_locked(bs);
2238 info->value->removable = bdrv_dev_has_removable_media(bs);
2239
2240 if (bdrv_dev_has_removable_media(bs)) {
2241 info->value->has_tray_open = true;
2242 info->value->tray_open = bdrv_dev_is_tray_open(bs);
2243 }
2244
2245 if (bdrv_iostatus_is_enabled(bs)) {
2246 info->value->has_io_status = true;
2247 info->value->io_status = bs->iostatus;
2248 }
2249
2250 if (bs->drv) {
2251 info->value->has_inserted = true;
2252 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2253 info->value->inserted->file = g_strdup(bs->filename);
2254 info->value->inserted->ro = bs->read_only;
2255 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2256 info->value->inserted->encrypted = bs->encrypted;
2257 if (bs->backing_file[0]) {
2258 info->value->inserted->has_backing_file = true;
2259 info->value->inserted->backing_file = g_strdup(bs->backing_file);
2260 }
2261
2262 if (bs->io_limits_enabled) {
2263 info->value->inserted->bps =
2264 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2265 info->value->inserted->bps_rd =
2266 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2267 info->value->inserted->bps_wr =
2268 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2269 info->value->inserted->iops =
2270 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2271 info->value->inserted->iops_rd =
2272 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2273 info->value->inserted->iops_wr =
2274 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2275 }
2276 }
2277
2278 /* XXX: waiting for the qapi to support GSList */
2279 if (!cur_item) {
2280 head = cur_item = info;
2281 } else {
2282 cur_item->next = info;
2283 cur_item = info;
2284 }
2285 }
2286
2287 return head;
2288 }
2289
2290 /* Consider exposing this as a full fledged QMP command */
2291 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
2292 {
2293 BlockStats *s;
2294
2295 s = g_malloc0(sizeof(*s));
2296
2297 if (bs->device_name[0]) {
2298 s->has_device = true;
2299 s->device = g_strdup(bs->device_name);
2300 }
2301
2302 s->stats = g_malloc0(sizeof(*s->stats));
2303 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2304 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2305 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2306 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2307 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2308 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2309 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2310 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2311 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2312
2313 if (bs->file) {
2314 s->has_parent = true;
2315 s->parent = qmp_query_blockstat(bs->file, NULL);
2316 }
2317
2318 return s;
2319 }
2320
2321 BlockStatsList *qmp_query_blockstats(Error **errp)
2322 {
2323 BlockStatsList *head = NULL, *cur_item = NULL;
2324 BlockDriverState *bs;
2325
2326 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2327 BlockStatsList *info = g_malloc0(sizeof(*info));
2328 info->value = qmp_query_blockstat(bs, NULL);
2329
2330 /* XXX: waiting for the qapi to support GSList */
2331 if (!cur_item) {
2332 head = cur_item = info;
2333 } else {
2334 cur_item->next = info;
2335 cur_item = info;
2336 }
2337 }
2338
2339 return head;
2340 }
2341
2342 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2343 {
2344 if (bs->backing_hd && bs->backing_hd->encrypted)
2345 return bs->backing_file;
2346 else if (bs->encrypted)
2347 return bs->filename;
2348 else
2349 return NULL;
2350 }
2351
2352 void bdrv_get_backing_filename(BlockDriverState *bs,
2353 char *filename, int filename_size)
2354 {
2355 pstrcpy(filename, filename_size, bs->backing_file);
2356 }
2357
2358 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
2359 const uint8_t *buf, int nb_sectors)
2360 {
2361 BlockDriver *drv = bs->drv;
2362 if (!drv)
2363 return -ENOMEDIUM;
2364 if (!drv->bdrv_write_compressed)
2365 return -ENOTSUP;
2366 if (bdrv_check_request(bs, sector_num, nb_sectors))
2367 return -EIO;
2368
2369 if (bs->dirty_bitmap) {
2370 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2371 }
2372
2373 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2374 }
2375
2376 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2377 {
2378 BlockDriver *drv = bs->drv;
2379 if (!drv)
2380 return -ENOMEDIUM;
2381 if (!drv->bdrv_get_info)
2382 return -ENOTSUP;
2383 memset(bdi, 0, sizeof(*bdi));
2384 return drv->bdrv_get_info(bs, bdi);
2385 }
2386
2387 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2388 int64_t pos, int size)
2389 {
2390 BlockDriver *drv = bs->drv;
2391 if (!drv)
2392 return -ENOMEDIUM;
2393 if (drv->bdrv_save_vmstate)
2394 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2395 if (bs->file)
2396 return bdrv_save_vmstate(bs->file, buf, pos, size);
2397 return -ENOTSUP;
2398 }
2399
2400 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2401 int64_t pos, int size)
2402 {
2403 BlockDriver *drv = bs->drv;
2404 if (!drv)
2405 return -ENOMEDIUM;
2406 if (drv->bdrv_load_vmstate)
2407 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2408 if (bs->file)
2409 return bdrv_load_vmstate(bs->file, buf, pos, size);
2410 return -ENOTSUP;
2411 }
2412
2413 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2414 {
2415 BlockDriver *drv = bs->drv;
2416
2417 if (!drv || !drv->bdrv_debug_event) {
2418 return;
2419 }
2420
2421 return drv->bdrv_debug_event(bs, event);
2422
2423 }
2424
2425 /**************************************************************/
2426 /* handling of snapshots */
2427
2428 int bdrv_can_snapshot(BlockDriverState *bs)
2429 {
2430 BlockDriver *drv = bs->drv;
2431 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
2432 return 0;
2433 }
2434
2435 if (!drv->bdrv_snapshot_create) {
2436 if (bs->file != NULL) {
2437 return bdrv_can_snapshot(bs->file);
2438 }
2439 return 0;
2440 }
2441
2442 return 1;
2443 }
2444
2445 int bdrv_is_snapshot(BlockDriverState *bs)
2446 {
2447 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2448 }
2449
2450 BlockDriverState *bdrv_snapshots(void)
2451 {
2452 BlockDriverState *bs;
2453
2454 if (bs_snapshots) {
2455 return bs_snapshots;
2456 }
2457
2458 bs = NULL;
2459 while ((bs = bdrv_next(bs))) {
2460 if (bdrv_can_snapshot(bs)) {
2461 bs_snapshots = bs;
2462 return bs;
2463 }
2464 }
2465 return NULL;
2466 }
2467
2468 int bdrv_snapshot_create(BlockDriverState *bs,
2469 QEMUSnapshotInfo *sn_info)
2470 {
2471 BlockDriver *drv = bs->drv;
2472 if (!drv)
2473 return -ENOMEDIUM;
2474 if (drv->bdrv_snapshot_create)
2475 return drv->bdrv_snapshot_create(bs, sn_info);
2476 if (bs->file)
2477 return bdrv_snapshot_create(bs->file, sn_info);
2478 return -ENOTSUP;
2479 }
2480
2481 int bdrv_snapshot_goto(BlockDriverState *bs,
2482 const char *snapshot_id)
2483 {
2484 BlockDriver *drv = bs->drv;
2485 int ret, open_ret;
2486
2487 if (!drv)
2488 return -ENOMEDIUM;
2489 if (drv->bdrv_snapshot_goto)
2490 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2491
2492 if (bs->file) {
2493 drv->bdrv_close(bs);
2494 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2495 open_ret = drv->bdrv_open(bs, bs->open_flags);
2496 if (open_ret < 0) {
2497 bdrv_delete(bs->file);
2498 bs->drv = NULL;
2499 return open_ret;
2500 }
2501 return ret;
2502 }
2503
2504 return -ENOTSUP;
2505 }
2506
2507 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2508 {
2509 BlockDriver *drv = bs->drv;
2510 if (!drv)
2511 return -ENOMEDIUM;
2512 if (drv->bdrv_snapshot_delete)
2513 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2514 if (bs->file)
2515 return bdrv_snapshot_delete(bs->file, snapshot_id);
2516 return -ENOTSUP;
2517 }
2518
2519 int bdrv_snapshot_list(BlockDriverState *bs,
2520 QEMUSnapshotInfo **psn_info)
2521 {
2522 BlockDriver *drv = bs->drv;
2523 if (!drv)
2524 return -ENOMEDIUM;
2525 if (drv->bdrv_snapshot_list)
2526 return drv->bdrv_snapshot_list(bs, psn_info);
2527 if (bs->file)
2528 return bdrv_snapshot_list(bs->file, psn_info);
2529 return -ENOTSUP;
2530 }
2531
2532 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2533 const char *snapshot_name)
2534 {
2535 BlockDriver *drv = bs->drv;
2536 if (!drv) {
2537 return -ENOMEDIUM;
2538 }
2539 if (!bs->read_only) {
2540 return -EINVAL;
2541 }
2542 if (drv->bdrv_snapshot_load_tmp) {
2543 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2544 }
2545 return -ENOTSUP;
2546 }
2547
2548 #define NB_SUFFIXES 4
2549
2550 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2551 {
2552 static const char suffixes[NB_SUFFIXES] = "KMGT";
2553 int64_t base;
2554 int i;
2555
2556 if (size <= 999) {
2557 snprintf(buf, buf_size, "%" PRId64, size);
2558 } else {
2559 base = 1024;
2560 for(i = 0; i < NB_SUFFIXES; i++) {
2561 if (size < (10 * base)) {
2562 snprintf(buf, buf_size, "%0.1f%c",
2563 (double)size / base,
2564 suffixes[i]);
2565 break;
2566 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2567 snprintf(buf, buf_size, "%" PRId64 "%c",
2568 ((size + (base >> 1)) / base),
2569 suffixes[i]);
2570 break;
2571 }
2572 base = base * 1024;
2573 }
2574 }
2575 return buf;
2576 }
2577
2578 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2579 {
2580 char buf1[128], date_buf[128], clock_buf[128];
2581 #ifdef _WIN32
2582 struct tm *ptm;
2583 #else
2584 struct tm tm;
2585 #endif
2586 time_t ti;
2587 int64_t secs;
2588
2589 if (!sn) {
2590 snprintf(buf, buf_size,
2591 "%-10s%-20s%7s%20s%15s",
2592 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2593 } else {
2594 ti = sn->date_sec;
2595 #ifdef _WIN32
2596 ptm = localtime(&ti);
2597 strftime(date_buf, sizeof(date_buf),
2598 "%Y-%m-%d %H:%M:%S", ptm);
2599 #else
2600 localtime_r(&ti, &tm);
2601 strftime(date_buf, sizeof(date_buf),
2602 "%Y-%m-%d %H:%M:%S", &tm);
2603 #endif
2604 secs = sn->vm_clock_nsec / 1000000000;
2605 snprintf(clock_buf, sizeof(clock_buf),
2606 "%02d:%02d:%02d.%03d",
2607 (int)(secs / 3600),
2608 (int)((secs / 60) % 60),
2609 (int)(secs % 60),
2610 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2611 snprintf(buf, buf_size,
2612 "%-10s%-20s%7s%20s%15s",
2613 sn->id_str, sn->name,
2614 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2615 date_buf,
2616 clock_buf);
2617 }
2618 return buf;
2619 }
2620
2621 /**************************************************************/
2622 /* async I/Os */
2623
2624 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2625 QEMUIOVector *qiov, int nb_sectors,
2626 BlockDriverCompletionFunc *cb, void *opaque)
2627 {
2628 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2629
2630 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2631 cb, opaque, false);
2632 }
2633
2634 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2635 QEMUIOVector *qiov, int nb_sectors,
2636 BlockDriverCompletionFunc *cb, void *opaque)
2637 {
2638 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2639
2640 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2641 cb, opaque, true);
2642 }
2643
2644
2645 typedef struct MultiwriteCB {
2646 int error;
2647 int num_requests;
2648 int num_callbacks;
2649 struct {
2650 BlockDriverCompletionFunc *cb;
2651 void *opaque;
2652 QEMUIOVector *free_qiov;
2653 void *free_buf;
2654 } callbacks[];
2655 } MultiwriteCB;
2656
2657 static void multiwrite_user_cb(MultiwriteCB *mcb)
2658 {
2659 int i;
2660
2661 for (i = 0; i < mcb->num_callbacks; i++) {
2662 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2663 if (mcb->callbacks[i].free_qiov) {
2664 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2665 }
2666 g_free(mcb->callbacks[i].free_qiov);
2667 qemu_vfree(mcb->callbacks[i].free_buf);
2668 }
2669 }
2670
2671 static void multiwrite_cb(void *opaque, int ret)
2672 {
2673 MultiwriteCB *mcb = opaque;
2674
2675 trace_multiwrite_cb(mcb, ret);
2676
2677 if (ret < 0 && !mcb->error) {
2678 mcb->error = ret;
2679 }
2680
2681 mcb->num_requests--;
2682 if (mcb->num_requests == 0) {
2683 multiwrite_user_cb(mcb);
2684 g_free(mcb);
2685 }
2686 }
2687
2688 static int multiwrite_req_compare(const void *a, const void *b)
2689 {
2690 const BlockRequest *req1 = a, *req2 = b;
2691
2692 /*
2693 * Note that we can't simply subtract req2->sector from req1->sector
2694 * here as that could overflow the return value.
2695 */
2696 if (req1->sector > req2->sector) {
2697 return 1;
2698 } else if (req1->sector < req2->sector) {
2699 return -1;
2700 } else {
2701 return 0;
2702 }
2703 }
2704
2705 /*
2706 * Takes a bunch of requests and tries to merge them. Returns the number of
2707 * requests that remain after merging.
2708 */
2709 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2710 int num_reqs, MultiwriteCB *mcb)
2711 {
2712 int i, outidx;
2713
2714 // Sort requests by start sector
2715 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2716
2717 // Check if adjacent requests touch the same clusters. If so, combine them,
2718 // filling up gaps with zero sectors.
2719 outidx = 0;
2720 for (i = 1; i < num_reqs; i++) {
2721 int merge = 0;
2722 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2723
2724 // This handles the cases that are valid for all block drivers, namely
2725 // exactly sequential writes and overlapping writes.
2726 if (reqs[i].sector <= oldreq_last) {
2727 merge = 1;
2728 }
2729
2730 // The block driver may decide that it makes sense to combine requests
2731 // even if there is a gap of some sectors between them. In this case,
2732 // the gap is filled with zeros (therefore only applicable for yet
2733 // unused space in format like qcow2).
2734 if (!merge && bs->drv->bdrv_merge_requests) {
2735 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2736 }
2737
2738 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2739 merge = 0;
2740 }
2741
2742 if (merge) {
2743 size_t size;
2744 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
2745 qemu_iovec_init(qiov,
2746 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2747
2748 // Add the first request to the merged one. If the requests are
2749 // overlapping, drop the last sectors of the first request.
2750 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2751 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2752
2753 // We might need to add some zeros between the two requests
2754 if (reqs[i].sector > oldreq_last) {
2755 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2756 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2757 memset(buf, 0, zero_bytes);
2758 qemu_iovec_add(qiov, buf, zero_bytes);
2759 mcb->callbacks[i].free_buf = buf;
2760 }
2761
2762 // Add the second request
2763 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2764
2765 reqs[outidx].nb_sectors = qiov->size >> 9;
2766 reqs[outidx].qiov = qiov;
2767
2768 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2769 } else {
2770 outidx++;
2771 reqs[outidx].sector = reqs[i].sector;
2772 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2773 reqs[outidx].qiov = reqs[i].qiov;
2774 }
2775 }
2776
2777 return outidx + 1;
2778 }
2779
2780 /*
2781 * Submit multiple AIO write requests at once.
2782 *
2783 * On success, the function returns 0 and all requests in the reqs array have
2784 * been submitted. In error case this function returns -1, and any of the
2785 * requests may or may not be submitted yet. In particular, this means that the
2786 * callback will be called for some of the requests, for others it won't. The
2787 * caller must check the error field of the BlockRequest to wait for the right
2788 * callbacks (if error != 0, no callback will be called).
2789 *
2790 * The implementation may modify the contents of the reqs array, e.g. to merge
2791 * requests. However, the fields opaque and error are left unmodified as they
2792 * are used to signal failure for a single request to the caller.
2793 */
2794 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2795 {
2796 BlockDriverAIOCB *acb;
2797 MultiwriteCB *mcb;
2798 int i;
2799
2800 /* don't submit writes if we don't have a medium */
2801 if (bs->drv == NULL) {
2802 for (i = 0; i < num_reqs; i++) {
2803 reqs[i].error = -ENOMEDIUM;
2804 }
2805 return -1;
2806 }
2807
2808 if (num_reqs == 0) {
2809 return 0;
2810 }
2811
2812 // Create MultiwriteCB structure
2813 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2814 mcb->num_requests = 0;
2815 mcb->num_callbacks = num_reqs;
2816
2817 for (i = 0; i < num_reqs; i++) {
2818 mcb->callbacks[i].cb = reqs[i].cb;
2819 mcb->callbacks[i].opaque = reqs[i].opaque;
2820 }
2821
2822 // Check for mergable requests
2823 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2824
2825 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2826
2827 /*
2828 * Run the aio requests. As soon as one request can't be submitted
2829 * successfully, fail all requests that are not yet submitted (we must
2830 * return failure for all requests anyway)
2831 *
2832 * num_requests cannot be set to the right value immediately: If
2833 * bdrv_aio_writev fails for some request, num_requests would be too high
2834 * and therefore multiwrite_cb() would never recognize the multiwrite
2835 * request as completed. We also cannot use the loop variable i to set it
2836 * when the first request fails because the callback may already have been
2837 * called for previously submitted requests. Thus, num_requests must be
2838 * incremented for each request that is submitted.
2839 *
2840 * The problem that callbacks may be called early also means that we need
2841 * to take care that num_requests doesn't become 0 before all requests are
2842 * submitted - multiwrite_cb() would consider the multiwrite request
2843 * completed. A dummy request that is "completed" by a manual call to
2844 * multiwrite_cb() takes care of this.
2845 */
2846 mcb->num_requests = 1;
2847
2848 // Run the aio requests
2849 for (i = 0; i < num_reqs; i++) {
2850 mcb->num_requests++;
2851 acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2852 reqs[i].nb_sectors, multiwrite_cb, mcb);
2853
2854 if (acb == NULL) {
2855 // We can only fail the whole thing if no request has been
2856 // submitted yet. Otherwise we'll wait for the submitted AIOs to
2857 // complete and report the error in the callback.
2858 if (i == 0) {
2859 trace_bdrv_aio_multiwrite_earlyfail(mcb);
2860 goto fail;
2861 } else {
2862 trace_bdrv_aio_multiwrite_latefail(mcb, i);
2863 multiwrite_cb(mcb, -EIO);
2864 break;
2865 }
2866 }
2867 }
2868
2869 /* Complete the dummy request */
2870 multiwrite_cb(mcb, 0);
2871
2872 return 0;
2873
2874 fail:
2875 for (i = 0; i < mcb->num_callbacks; i++) {
2876 reqs[i].error = -EIO;
2877 }
2878 g_free(mcb);
2879 return -1;
2880 }
2881
2882 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2883 {
2884 acb->pool->cancel(acb);
2885 }
2886
2887 /* block I/O throttling */
2888 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
2889 bool is_write, double elapsed_time, uint64_t *wait)
2890 {
2891 uint64_t bps_limit = 0;
2892 double bytes_limit, bytes_base, bytes_res;
2893 double slice_time, wait_time;
2894
2895 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
2896 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2897 } else if (bs->io_limits.bps[is_write]) {
2898 bps_limit = bs->io_limits.bps[is_write];
2899 } else {
2900 if (wait) {
2901 *wait = 0;
2902 }
2903
2904 return false;
2905 }
2906
2907 slice_time = bs->slice_end - bs->slice_start;
2908 slice_time /= (NANOSECONDS_PER_SECOND);
2909 bytes_limit = bps_limit * slice_time;
2910 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
2911 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
2912 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
2913 }
2914
2915 /* bytes_base: the bytes of data which have been read/written; and
2916 * it is obtained from the history statistic info.
2917 * bytes_res: the remaining bytes of data which need to be read/written.
2918 * (bytes_base + bytes_res) / bps_limit: used to calcuate
2919 * the total time for completing reading/writting all data.
2920 */
2921 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2922
2923 if (bytes_base + bytes_res <= bytes_limit) {
2924 if (wait) {
2925 *wait = 0;
2926 }
2927
2928 return false;
2929 }
2930
2931 /* Calc approx time to dispatch */
2932 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
2933
2934 /* When the I/O rate at runtime exceeds the limits,
2935 * bs->slice_end need to be extended in order that the current statistic
2936 * info can be kept until the timer fire, so it is increased and tuned
2937 * based on the result of experiment.
2938 */
2939 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
2940 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
2941 if (wait) {
2942 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
2943 }
2944
2945 return true;
2946 }
2947
2948 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
2949 double elapsed_time, uint64_t *wait)
2950 {
2951 uint64_t iops_limit = 0;
2952 double ios_limit, ios_base;
2953 double slice_time, wait_time;
2954
2955 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
2956 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2957 } else if (bs->io_limits.iops[is_write]) {
2958 iops_limit = bs->io_limits.iops[is_write];
2959 } else {
2960 if (wait) {
2961 *wait = 0;
2962 }
2963
2964 return false;
2965 }
2966
2967 slice_time = bs->slice_end - bs->slice_start;
2968 slice_time /= (NANOSECONDS_PER_SECOND);
2969 ios_limit = iops_limit * slice_time;
2970 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
2971 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
2972 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
2973 }
2974
2975 if (ios_base + 1 <= ios_limit) {
2976 if (wait) {
2977 *wait = 0;
2978 }
2979
2980 return false;
2981 }
2982
2983 /* Calc approx time to dispatch */
2984 wait_time = (ios_base + 1) / iops_limit;
2985 if (wait_time > elapsed_time) {
2986 wait_time = wait_time - elapsed_time;
2987 } else {
2988 wait_time = 0;
2989 }
2990
2991 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
2992 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
2993 if (wait) {
2994 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
2995 }
2996
2997 return true;
2998 }
2999
3000 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3001 bool is_write, int64_t *wait)
3002 {
3003 int64_t now, max_wait;
3004 uint64_t bps_wait = 0, iops_wait = 0;
3005 double elapsed_time;
3006 int bps_ret, iops_ret;
3007
3008 now = qemu_get_clock_ns(vm_clock);
3009 if ((bs->slice_start < now)
3010 && (bs->slice_end > now)) {
3011 bs->slice_end = now + bs->slice_time;
3012 } else {
3013 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3014 bs->slice_start = now;
3015 bs->slice_end = now + bs->slice_time;
3016
3017 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3018 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3019
3020 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3021 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3022 }
3023
3024 elapsed_time = now - bs->slice_start;
3025 elapsed_time /= (NANOSECONDS_PER_SECOND);
3026
3027 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3028 is_write, elapsed_time, &bps_wait);
3029 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3030 elapsed_time, &iops_wait);
3031 if (bps_ret || iops_ret) {
3032 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3033 if (wait) {
3034 *wait = max_wait;
3035 }
3036
3037 now = qemu_get_clock_ns(vm_clock);
3038 if (bs->slice_end < now + max_wait) {
3039 bs->slice_end = now + max_wait;
3040 }
3041
3042 return true;
3043 }
3044
3045 if (wait) {
3046 *wait = 0;
3047 }
3048
3049 return false;
3050 }
3051
3052 /**************************************************************/
3053 /* async block device emulation */
3054
3055 typedef struct BlockDriverAIOCBSync {
3056 BlockDriverAIOCB common;
3057 QEMUBH *bh;
3058 int ret;
3059 /* vector translation state */
3060 QEMUIOVector *qiov;
3061 uint8_t *bounce;
3062 int is_write;
3063 } BlockDriverAIOCBSync;
3064
3065 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3066 {
3067 BlockDriverAIOCBSync *acb =
3068 container_of(blockacb, BlockDriverAIOCBSync, common);
3069 qemu_bh_delete(acb->bh);
3070 acb->bh = NULL;
3071 qemu_aio_release(acb);
3072 }
3073
3074 static AIOPool bdrv_em_aio_pool = {
3075 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3076 .cancel = bdrv_aio_cancel_em,
3077 };
3078
3079 static void bdrv_aio_bh_cb(void *opaque)
3080 {
3081 BlockDriverAIOCBSync *acb = opaque;
3082
3083 if (!acb->is_write)
3084 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
3085 qemu_vfree(acb->bounce);
3086 acb->common.cb(acb->common.opaque, acb->ret);
3087 qemu_bh_delete(acb->bh);
3088 acb->bh = NULL;
3089 qemu_aio_release(acb);
3090 }
3091
3092 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3093 int64_t sector_num,
3094 QEMUIOVector *qiov,
3095 int nb_sectors,
3096 BlockDriverCompletionFunc *cb,
3097 void *opaque,
3098 int is_write)
3099
3100 {
3101 BlockDriverAIOCBSync *acb;
3102
3103 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
3104 acb->is_write = is_write;
3105 acb->qiov = qiov;
3106 acb->bounce = qemu_blockalign(bs, qiov->size);
3107
3108 if (!acb->bh)
3109 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
3110
3111 if (is_write) {
3112 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
3113 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
3114 } else {
3115 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
3116 }
3117
3118 qemu_bh_schedule(acb->bh);
3119
3120 return &acb->common;
3121 }
3122
3123 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3124 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3125 BlockDriverCompletionFunc *cb, void *opaque)
3126 {
3127 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
3128 }
3129
3130 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3131 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3132 BlockDriverCompletionFunc *cb, void *opaque)
3133 {
3134 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3135 }
3136
3137
3138 typedef struct BlockDriverAIOCBCoroutine {
3139 BlockDriverAIOCB common;
3140 BlockRequest req;
3141 bool is_write;
3142 QEMUBH* bh;
3143 } BlockDriverAIOCBCoroutine;
3144
3145 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3146 {
3147 qemu_aio_flush();
3148 }
3149
3150 static AIOPool bdrv_em_co_aio_pool = {
3151 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3152 .cancel = bdrv_aio_co_cancel_em,
3153 };
3154
3155 static void bdrv_co_em_bh(void *opaque)
3156 {
3157 BlockDriverAIOCBCoroutine *acb = opaque;
3158
3159 acb->common.cb(acb->common.opaque, acb->req.error);
3160 qemu_bh_delete(acb->bh);
3161 qemu_aio_release(acb);
3162 }
3163
3164 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3165 static void coroutine_fn bdrv_co_do_rw(void *opaque)
3166 {
3167 BlockDriverAIOCBCoroutine *acb = opaque;
3168 BlockDriverState *bs = acb->common.bs;
3169
3170 if (!acb->is_write) {
3171 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
3172 acb->req.nb_sectors, acb->req.qiov);
3173 } else {
3174 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
3175 acb->req.nb_sectors, acb->req.qiov);
3176 }
3177
3178 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3179 qemu_bh_schedule(acb->bh);
3180 }
3181
3182 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3183 int64_t sector_num,
3184 QEMUIOVector *qiov,
3185 int nb_sectors,
3186 BlockDriverCompletionFunc *cb,
3187 void *opaque,
3188 bool is_write)
3189 {
3190 Coroutine *co;
3191 BlockDriverAIOCBCoroutine *acb;
3192
3193 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3194 acb->req.sector = sector_num;
3195 acb->req.nb_sectors = nb_sectors;
3196 acb->req.qiov = qiov;
3197 acb->is_write = is_write;
3198
3199 co = qemu_coroutine_create(bdrv_co_do_rw);
3200 qemu_coroutine_enter(co, acb);
3201
3202 return &acb->common;
3203 }
3204
3205 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
3206 {
3207 BlockDriverAIOCBCoroutine *acb = opaque;
3208 BlockDriverState *bs = acb->common.bs;
3209
3210 acb->req.error = bdrv_co_flush(bs);
3211 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3212 qemu_bh_schedule(acb->bh);
3213 }
3214
3215 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
3216 BlockDriverCompletionFunc *cb, void *opaque)
3217 {
3218 trace_bdrv_aio_flush(bs, opaque);
3219
3220 Coroutine *co;
3221 BlockDriverAIOCBCoroutine *acb;
3222
3223 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3224 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3225 qemu_coroutine_enter(co, acb);
3226
3227 return &acb->common;
3228 }
3229
3230 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3231 {
3232 BlockDriverAIOCBCoroutine *acb = opaque;
3233 BlockDriverState *bs = acb->common.bs;
3234
3235 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3236 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3237 qemu_bh_schedule(acb->bh);
3238 }
3239
3240 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3241 int64_t sector_num, int nb_sectors,
3242 BlockDriverCompletionFunc *cb, void *opaque)
3243 {
3244 Coroutine *co;
3245 BlockDriverAIOCBCoroutine *acb;
3246
3247 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3248
3249 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3250 acb->req.sector = sector_num;
3251 acb->req.nb_sectors = nb_sectors;
3252 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3253 qemu_coroutine_enter(co, acb);
3254
3255 return &acb->common;
3256 }
3257
3258 void bdrv_init(void)
3259 {
3260 module_call_init(MODULE_INIT_BLOCK);
3261 }
3262
3263 void bdrv_init_with_whitelist(void)
3264 {
3265 use_bdrv_whitelist = 1;
3266 bdrv_init();
3267 }
3268
3269 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3270 BlockDriverCompletionFunc *cb, void *opaque)
3271 {
3272 BlockDriverAIOCB *acb;
3273
3274 if (pool->free_aiocb) {
3275 acb = pool->free_aiocb;
3276 pool->free_aiocb = acb->next;
3277 } else {
3278 acb = g_malloc0(pool->aiocb_size);
3279 acb->pool = pool;
3280 }
3281 acb->bs = bs;
3282 acb->cb = cb;
3283 acb->opaque = opaque;
3284 return acb;
3285 }
3286
3287 void qemu_aio_release(void *p)
3288 {
3289 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3290 AIOPool *pool = acb->pool;
3291 acb->next = pool->free_aiocb;
3292 pool->free_aiocb = acb;
3293 }
3294
3295 /**************************************************************/
3296 /* Coroutine block device emulation */
3297
3298 typedef struct CoroutineIOCompletion {
3299 Coroutine *coroutine;
3300 int ret;
3301 } CoroutineIOCompletion;
3302
3303 static void bdrv_co_io_em_complete(void *opaque, int ret)
3304 {
3305 CoroutineIOCompletion *co = opaque;
3306
3307 co->ret = ret;
3308 qemu_coroutine_enter(co->coroutine, NULL);
3309 }
3310
3311 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3312 int nb_sectors, QEMUIOVector *iov,
3313 bool is_write)
3314 {
3315 CoroutineIOCompletion co = {
3316 .coroutine = qemu_coroutine_self(),
3317 };
3318 BlockDriverAIOCB *acb;
3319
3320 if (is_write) {
3321 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3322 bdrv_co_io_em_complete, &co);
3323 } else {
3324 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3325 bdrv_co_io_em_complete, &co);
3326 }
3327
3328 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
3329 if (!acb) {
3330 return -EIO;
3331 }
3332 qemu_coroutine_yield();
3333
3334 return co.ret;
3335 }
3336
3337 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3338 int64_t sector_num, int nb_sectors,
3339 QEMUIOVector *iov)
3340 {
3341 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3342 }
3343
3344 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3345 int64_t sector_num, int nb_sectors,
3346 QEMUIOVector *iov)
3347 {
3348 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3349 }
3350
3351 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
3352 {
3353 RwCo *rwco = opaque;
3354
3355 rwco->ret = bdrv_co_flush(rwco->bs);
3356 }
3357
3358 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3359 {
3360 int ret;
3361
3362 if (!bs->drv) {
3363 return 0;
3364 }
3365
3366 /* Write back cached data to the OS even with cache=unsafe */
3367 if (bs->drv->bdrv_co_flush_to_os) {
3368 ret = bs->drv->bdrv_co_flush_to_os(bs);
3369 if (ret < 0) {
3370 return ret;
3371 }
3372 }
3373
3374 /* But don't actually force it to the disk with cache=unsafe */
3375 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3376 return 0;
3377 }
3378
3379 if (bs->drv->bdrv_co_flush_to_disk) {
3380 return bs->drv->bdrv_co_flush_to_disk(bs);
3381 } else if (bs->drv->bdrv_aio_flush) {
3382 BlockDriverAIOCB *acb;
3383 CoroutineIOCompletion co = {
3384 .coroutine = qemu_coroutine_self(),
3385 };
3386
3387 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3388 if (acb == NULL) {
3389 return -EIO;
3390 } else {
3391 qemu_coroutine_yield();
3392 return co.ret;
3393 }
3394 } else {
3395 /*
3396 * Some block drivers always operate in either writethrough or unsafe
3397 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3398 * know how the server works (because the behaviour is hardcoded or
3399 * depends on server-side configuration), so we can't ensure that
3400 * everything is safe on disk. Returning an error doesn't work because
3401 * that would break guests even if the server operates in writethrough
3402 * mode.
3403 *
3404 * Let's hope the user knows what he's doing.
3405 */
3406 return 0;
3407 }
3408 }
3409
3410 void bdrv_invalidate_cache(BlockDriverState *bs)
3411 {
3412 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3413 bs->drv->bdrv_invalidate_cache(bs);
3414 }
3415 }
3416
3417 void bdrv_invalidate_cache_all(void)
3418 {
3419 BlockDriverState *bs;
3420
3421 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3422 bdrv_invalidate_cache(bs);
3423 }
3424 }
3425
3426 int bdrv_flush(BlockDriverState *bs)
3427 {
3428 Coroutine *co;
3429 RwCo rwco = {
3430 .bs = bs,
3431 .ret = NOT_DONE,
3432 };
3433
3434 if (qemu_in_coroutine()) {
3435 /* Fast-path if already in coroutine context */
3436 bdrv_flush_co_entry(&rwco);
3437 } else {
3438 co = qemu_coroutine_create(bdrv_flush_co_entry);
3439 qemu_coroutine_enter(co, &rwco);
3440 while (rwco.ret == NOT_DONE) {
3441 qemu_aio_wait();
3442 }
3443 }
3444
3445 return rwco.ret;
3446 }
3447
3448 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3449 {
3450 RwCo *rwco = opaque;
3451
3452 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3453 }
3454
3455 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3456 int nb_sectors)
3457 {
3458 if (!bs->drv) {
3459 return -ENOMEDIUM;
3460 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3461 return -EIO;
3462 } else if (bs->read_only) {
3463 return -EROFS;
3464 } else if (bs->drv->bdrv_co_discard) {
3465 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3466 } else if (bs->drv->bdrv_aio_discard) {
3467 BlockDriverAIOCB *acb;
3468 CoroutineIOCompletion co = {
3469 .coroutine = qemu_coroutine_self(),
3470 };
3471
3472 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3473 bdrv_co_io_em_complete, &co);
3474 if (acb == NULL) {
3475 return -EIO;
3476 } else {
3477 qemu_coroutine_yield();
3478 return co.ret;
3479 }
3480 } else {
3481 return 0;
3482 }
3483 }
3484
3485 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3486 {
3487 Coroutine *co;
3488 RwCo rwco = {
3489 .bs = bs,
3490 .sector_num = sector_num,
3491 .nb_sectors = nb_sectors,
3492 .ret = NOT_DONE,
3493 };
3494
3495 if (qemu_in_coroutine()) {
3496 /* Fast-path if already in coroutine context */
3497 bdrv_discard_co_entry(&rwco);
3498 } else {
3499 co = qemu_coroutine_create(bdrv_discard_co_entry);
3500 qemu_coroutine_enter(co, &rwco);
3501 while (rwco.ret == NOT_DONE) {
3502 qemu_aio_wait();
3503 }
3504 }
3505
3506 return rwco.ret;
3507 }
3508
3509 /**************************************************************/
3510 /* removable device support */
3511
3512 /**
3513 * Return TRUE if the media is present
3514 */
3515 int bdrv_is_inserted(BlockDriverState *bs)
3516 {
3517 BlockDriver *drv = bs->drv;
3518
3519 if (!drv)
3520 return 0;
3521 if (!drv->bdrv_is_inserted)
3522 return 1;
3523 return drv->bdrv_is_inserted(bs);
3524 }
3525
3526 /**
3527 * Return whether the media changed since the last call to this
3528 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3529 */
3530 int bdrv_media_changed(BlockDriverState *bs)
3531 {
3532 BlockDriver *drv = bs->drv;
3533
3534 if (drv && drv->bdrv_media_changed) {
3535 return drv->bdrv_media_changed(bs);
3536 }
3537 return -ENOTSUP;
3538 }
3539
3540 /**
3541 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3542 */
3543 void bdrv_eject(BlockDriverState *bs, int eject_flag)
3544 {
3545 BlockDriver *drv = bs->drv;
3546
3547 if (drv && drv->bdrv_eject) {
3548 drv->bdrv_eject(bs, eject_flag);
3549 }
3550 }
3551
3552 /**
3553 * Lock or unlock the media (if it is locked, the user won't be able
3554 * to eject it manually).
3555 */
3556 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
3557 {
3558 BlockDriver *drv = bs->drv;
3559
3560 trace_bdrv_lock_medium(bs, locked);
3561
3562 if (drv && drv->bdrv_lock_medium) {
3563 drv->bdrv_lock_medium(bs, locked);
3564 }
3565 }
3566
3567 /* needed for generic scsi interface */
3568
3569 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3570 {
3571 BlockDriver *drv = bs->drv;
3572
3573 if (drv && drv->bdrv_ioctl)
3574 return drv->bdrv_ioctl(bs, req, buf);
3575 return -ENOTSUP;
3576 }
3577
3578 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3579 unsigned long int req, void *buf,
3580 BlockDriverCompletionFunc *cb, void *opaque)
3581 {
3582 BlockDriver *drv = bs->drv;
3583
3584 if (drv && drv->bdrv_aio_ioctl)
3585 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3586 return NULL;
3587 }
3588
3589 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3590 {
3591 bs->buffer_alignment = align;
3592 }
3593
3594 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3595 {
3596 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3597 }
3598
3599 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3600 {
3601 int64_t bitmap_size;
3602
3603 bs->dirty_count = 0;
3604 if (enable) {
3605 if (!bs->dirty_bitmap) {
3606 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
3607 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
3608 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
3609
3610 bs->dirty_bitmap = g_malloc0(bitmap_size);
3611 }
3612 } else {
3613 if (bs->dirty_bitmap) {
3614 g_free(bs->dirty_bitmap);
3615 bs->dirty_bitmap = NULL;
3616 }
3617 }
3618 }
3619
3620 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
3621 {
3622 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
3623
3624 if (bs->dirty_bitmap &&
3625 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
3626 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
3627 (1UL << (chunk % (sizeof(unsigned long) * 8))));
3628 } else {
3629 return 0;
3630 }
3631 }
3632
3633 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
3634 int nr_sectors)
3635 {
3636 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
3637 }
3638
3639 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
3640 {
3641 return bs->dirty_count;
3642 }
3643
3644 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
3645 {
3646 assert(bs->in_use != in_use);
3647 bs->in_use = in_use;
3648 }
3649
3650 int bdrv_in_use(BlockDriverState *bs)
3651 {
3652 return bs->in_use;
3653 }
3654
3655 void bdrv_iostatus_enable(BlockDriverState *bs)
3656 {
3657 bs->iostatus_enabled = true;
3658 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3659 }
3660
3661 /* The I/O status is only enabled if the drive explicitly
3662 * enables it _and_ the VM is configured to stop on errors */
3663 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
3664 {
3665 return (bs->iostatus_enabled &&
3666 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
3667 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
3668 bs->on_read_error == BLOCK_ERR_STOP_ANY));
3669 }
3670
3671 void bdrv_iostatus_disable(BlockDriverState *bs)
3672 {
3673 bs->iostatus_enabled = false;
3674 }
3675
3676 void bdrv_iostatus_reset(BlockDriverState *bs)
3677 {
3678 if (bdrv_iostatus_is_enabled(bs)) {
3679 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3680 }
3681 }
3682
3683 /* XXX: Today this is set by device models because it makes the implementation
3684 quite simple. However, the block layer knows about the error, so it's
3685 possible to implement this without device models being involved */
3686 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
3687 {
3688 if (bdrv_iostatus_is_enabled(bs) &&
3689 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
3690 assert(error >= 0);
3691 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
3692 BLOCK_DEVICE_IO_STATUS_FAILED;
3693 }
3694 }
3695
3696 void
3697 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
3698 enum BlockAcctType type)
3699 {
3700 assert(type < BDRV_MAX_IOTYPE);
3701
3702 cookie->bytes = bytes;
3703 cookie->start_time_ns = get_clock();
3704 cookie->type = type;
3705 }
3706
3707 void
3708 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
3709 {
3710 assert(cookie->type < BDRV_MAX_IOTYPE);
3711
3712 bs->nr_bytes[cookie->type] += cookie->bytes;
3713 bs->nr_ops[cookie->type]++;
3714 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
3715 }
3716
3717 int bdrv_img_create(const char *filename, const char *fmt,
3718 const char *base_filename, const char *base_fmt,
3719 char *options, uint64_t img_size, int flags)
3720 {
3721 QEMUOptionParameter *param = NULL, *create_options = NULL;
3722 QEMUOptionParameter *backing_fmt, *backing_file, *size;
3723 BlockDriverState *bs = NULL;
3724 BlockDriver *drv, *proto_drv;
3725 BlockDriver *backing_drv = NULL;
3726 int ret = 0;
3727
3728 /* Find driver and parse its options */
3729 drv = bdrv_find_format(fmt);
3730 if (!drv) {
3731 error_report("Unknown file format '%s'", fmt);
3732 ret = -EINVAL;
3733 goto out;
3734 }
3735
3736 proto_drv = bdrv_find_protocol(filename);
3737 if (!proto_drv) {
3738 error_report("Unknown protocol '%s'", filename);
3739 ret = -EINVAL;
3740 goto out;
3741 }
3742
3743 create_options = append_option_parameters(create_options,
3744 drv->create_options);
3745 create_options = append_option_parameters(create_options,
3746 proto_drv->create_options);
3747
3748 /* Create parameter list with default values */
3749 param = parse_option_parameters("", create_options, param);
3750
3751 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
3752
3753 /* Parse -o options */
3754 if (options) {
3755 param = parse_option_parameters(options, create_options, param);
3756 if (param == NULL) {
3757 error_report("Invalid options for file format '%s'.", fmt);
3758 ret = -EINVAL;
3759 goto out;
3760 }
3761 }
3762
3763 if (base_filename) {
3764 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
3765 base_filename)) {
3766 error_report("Backing file not supported for file format '%s'",
3767 fmt);
3768 ret = -EINVAL;
3769 goto out;
3770 }
3771 }
3772
3773 if (base_fmt) {
3774 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
3775 error_report("Backing file format not supported for file "
3776 "format '%s'", fmt);
3777 ret = -EINVAL;
3778 goto out;
3779 }
3780 }
3781
3782 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
3783 if (backing_file && backing_file->value.s) {
3784 if (!strcmp(filename, backing_file->value.s)) {
3785 error_report("Error: Trying to create an image with the "
3786 "same filename as the backing file");
3787 ret = -EINVAL;
3788 goto out;
3789 }
3790 }
3791
3792 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
3793 if (backing_fmt && backing_fmt->value.s) {
3794 backing_drv = bdrv_find_format(backing_fmt->value.s);
3795 if (!backing_drv) {
3796 error_report("Unknown backing file format '%s'",
3797 backing_fmt->value.s);
3798 ret = -EINVAL;
3799 goto out;
3800 }
3801 }
3802
3803 // The size for the image must always be specified, with one exception:
3804 // If we are using a backing file, we can obtain the size from there
3805 size = get_option_parameter(param, BLOCK_OPT_SIZE);
3806 if (size && size->value.n == -1) {
3807 if (backing_file && backing_file->value.s) {
3808 uint64_t size;
3809 char buf[32];
3810
3811 bs = bdrv_new("");
3812
3813 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);
3814 if (ret < 0) {
3815 error_report("Could not open '%s'", backing_file->value.s);
3816 goto out;
3817 }
3818 bdrv_get_geometry(bs, &size);
3819 size *= 512;
3820
3821 snprintf(buf, sizeof(buf), "%" PRId64, size);
3822 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
3823 } else {
3824 error_report("Image creation needs a size parameter");
3825 ret = -EINVAL;
3826 goto out;
3827 }
3828 }
3829
3830 printf("Formatting '%s', fmt=%s ", filename, fmt);
3831 print_option_parameters(param);
3832 puts("");
3833
3834 ret = bdrv_create(drv, filename, param);
3835
3836 if (ret < 0) {
3837 if (ret == -ENOTSUP) {
3838 error_report("Formatting or formatting option not supported for "
3839 "file format '%s'", fmt);
3840 } else if (ret == -EFBIG) {
3841 error_report("The image size is too large for file format '%s'",
3842 fmt);
3843 } else {
3844 error_report("%s: error while creating %s: %s", filename, fmt,
3845 strerror(-ret));
3846 }
3847 }
3848
3849 out:
3850 free_option_parameters(create_options);
3851 free_option_parameters(param);
3852
3853 if (bs) {
3854 bdrv_delete(bs);
3855 }
3856
3857 return ret;
3858 }