]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
Set runstate to INMIGRATE earlier
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qjson.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
34
35 #ifdef CONFIG_BSD
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
40 #ifndef __DragonFly__
41 #include <sys/disk.h>
42 #endif
43 #endif
44
45 #ifdef _WIN32
46 #include <windows.h>
47 #endif
48
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
51 typedef enum {
52 BDRV_REQ_COPY_ON_READ = 0x1,
53 BDRV_REQ_ZERO_WRITE = 0x2,
54 } BdrvRequestFlags;
55
56 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
57 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
58 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
59 BlockDriverCompletionFunc *cb, void *opaque);
60 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
62 BlockDriverCompletionFunc *cb, void *opaque);
63 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
64 int64_t sector_num, int nb_sectors,
65 QEMUIOVector *iov);
66 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
69 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
70 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
71 BdrvRequestFlags flags);
72 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
75 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
76 int64_t sector_num,
77 QEMUIOVector *qiov,
78 int nb_sectors,
79 BlockDriverCompletionFunc *cb,
80 void *opaque,
81 bool is_write);
82 static void coroutine_fn bdrv_co_do_rw(void *opaque);
83
84 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
85 bool is_write, double elapsed_time, uint64_t *wait);
86 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
87 double elapsed_time, uint64_t *wait);
88 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
89 bool is_write, int64_t *wait);
90
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
95 QLIST_HEAD_INITIALIZER(bdrv_drivers);
96
97 /* The device to use for VM snapshots */
98 static BlockDriverState *bs_snapshots;
99
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109 }
110
111 int is_windows_drive(const char *filename)
112 {
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120 }
121 #endif
122
123 /* throttling disk I/O limits */
124 void bdrv_io_limits_disable(BlockDriverState *bs)
125 {
126 bs->io_limits_enabled = false;
127
128 while (qemu_co_queue_next(&bs->throttled_reqs));
129
130 if (bs->block_timer) {
131 qemu_del_timer(bs->block_timer);
132 qemu_free_timer(bs->block_timer);
133 bs->block_timer = NULL;
134 }
135
136 bs->slice_start = 0;
137 bs->slice_end = 0;
138 bs->slice_time = 0;
139 memset(&bs->io_base, 0, sizeof(bs->io_base));
140 }
141
142 static void bdrv_block_timer(void *opaque)
143 {
144 BlockDriverState *bs = opaque;
145
146 qemu_co_queue_next(&bs->throttled_reqs);
147 }
148
149 void bdrv_io_limits_enable(BlockDriverState *bs)
150 {
151 qemu_co_queue_init(&bs->throttled_reqs);
152 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
153 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
154 bs->slice_start = qemu_get_clock_ns(vm_clock);
155 bs->slice_end = bs->slice_start + bs->slice_time;
156 memset(&bs->io_base, 0, sizeof(bs->io_base));
157 bs->io_limits_enabled = true;
158 }
159
160 bool bdrv_io_limits_enabled(BlockDriverState *bs)
161 {
162 BlockIOLimit *io_limits = &bs->io_limits;
163 return io_limits->bps[BLOCK_IO_LIMIT_READ]
164 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
165 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
166 || io_limits->iops[BLOCK_IO_LIMIT_READ]
167 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
168 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
169 }
170
171 static void bdrv_io_limits_intercept(BlockDriverState *bs,
172 bool is_write, int nb_sectors)
173 {
174 int64_t wait_time = -1;
175
176 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
177 qemu_co_queue_wait(&bs->throttled_reqs);
178 }
179
180 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
181 * throttled requests will not be dequeued until the current request is
182 * allowed to be serviced. So if the current request still exceeds the
183 * limits, it will be inserted to the head. All requests followed it will
184 * be still in throttled_reqs queue.
185 */
186
187 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
188 qemu_mod_timer(bs->block_timer,
189 wait_time + qemu_get_clock_ns(vm_clock));
190 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
191 }
192
193 qemu_co_queue_next(&bs->throttled_reqs);
194 }
195
196 /* check if the path starts with "<protocol>:" */
197 static int path_has_protocol(const char *path)
198 {
199 #ifdef _WIN32
200 if (is_windows_drive(path) ||
201 is_windows_drive_prefix(path)) {
202 return 0;
203 }
204 #endif
205
206 return strchr(path, ':') != NULL;
207 }
208
209 int path_is_absolute(const char *path)
210 {
211 const char *p;
212 #ifdef _WIN32
213 /* specific case for names like: "\\.\d:" */
214 if (*path == '/' || *path == '\\')
215 return 1;
216 #endif
217 p = strchr(path, ':');
218 if (p)
219 p++;
220 else
221 p = path;
222 #ifdef _WIN32
223 return (*p == '/' || *p == '\\');
224 #else
225 return (*p == '/');
226 #endif
227 }
228
229 /* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
231 supported. */
232 void path_combine(char *dest, int dest_size,
233 const char *base_path,
234 const char *filename)
235 {
236 const char *p, *p1;
237 int len;
238
239 if (dest_size <= 0)
240 return;
241 if (path_is_absolute(filename)) {
242 pstrcpy(dest, dest_size, filename);
243 } else {
244 p = strchr(base_path, ':');
245 if (p)
246 p++;
247 else
248 p = base_path;
249 p1 = strrchr(base_path, '/');
250 #ifdef _WIN32
251 {
252 const char *p2;
253 p2 = strrchr(base_path, '\\');
254 if (!p1 || p2 > p1)
255 p1 = p2;
256 }
257 #endif
258 if (p1)
259 p1++;
260 else
261 p1 = base_path;
262 if (p1 > p)
263 p = p1;
264 len = p - base_path;
265 if (len > dest_size - 1)
266 len = dest_size - 1;
267 memcpy(dest, base_path, len);
268 dest[len] = '\0';
269 pstrcat(dest, dest_size, filename);
270 }
271 }
272
273 void bdrv_register(BlockDriver *bdrv)
274 {
275 /* Block drivers without coroutine functions need emulation */
276 if (!bdrv->bdrv_co_readv) {
277 bdrv->bdrv_co_readv = bdrv_co_readv_em;
278 bdrv->bdrv_co_writev = bdrv_co_writev_em;
279
280 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
281 * the block driver lacks aio we need to emulate that too.
282 */
283 if (!bdrv->bdrv_aio_readv) {
284 /* add AIO emulation layer */
285 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
286 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
287 }
288 }
289
290 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
291 }
292
293 /* create a new block device (by default it is empty) */
294 BlockDriverState *bdrv_new(const char *device_name)
295 {
296 BlockDriverState *bs;
297
298 bs = g_malloc0(sizeof(BlockDriverState));
299 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
300 if (device_name[0] != '\0') {
301 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
302 }
303 bdrv_iostatus_disable(bs);
304 return bs;
305 }
306
307 BlockDriver *bdrv_find_format(const char *format_name)
308 {
309 BlockDriver *drv1;
310 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
311 if (!strcmp(drv1->format_name, format_name)) {
312 return drv1;
313 }
314 }
315 return NULL;
316 }
317
318 static int bdrv_is_whitelisted(BlockDriver *drv)
319 {
320 static const char *whitelist[] = {
321 CONFIG_BDRV_WHITELIST
322 };
323 const char **p;
324
325 if (!whitelist[0])
326 return 1; /* no whitelist, anything goes */
327
328 for (p = whitelist; *p; p++) {
329 if (!strcmp(drv->format_name, *p)) {
330 return 1;
331 }
332 }
333 return 0;
334 }
335
336 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
337 {
338 BlockDriver *drv = bdrv_find_format(format_name);
339 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
340 }
341
342 int bdrv_create(BlockDriver *drv, const char* filename,
343 QEMUOptionParameter *options)
344 {
345 if (!drv->bdrv_create)
346 return -ENOTSUP;
347
348 return drv->bdrv_create(filename, options);
349 }
350
351 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
352 {
353 BlockDriver *drv;
354
355 drv = bdrv_find_protocol(filename);
356 if (drv == NULL) {
357 return -ENOENT;
358 }
359
360 return bdrv_create(drv, filename, options);
361 }
362
363 #ifdef _WIN32
364 void get_tmp_filename(char *filename, int size)
365 {
366 char temp_dir[MAX_PATH];
367
368 GetTempPath(MAX_PATH, temp_dir);
369 GetTempFileName(temp_dir, "qem", 0, filename);
370 }
371 #else
372 void get_tmp_filename(char *filename, int size)
373 {
374 int fd;
375 const char *tmpdir;
376 /* XXX: race condition possible */
377 tmpdir = getenv("TMPDIR");
378 if (!tmpdir)
379 tmpdir = "/tmp";
380 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
381 fd = mkstemp(filename);
382 close(fd);
383 }
384 #endif
385
386 /*
387 * Detect host devices. By convention, /dev/cdrom[N] is always
388 * recognized as a host CDROM.
389 */
390 static BlockDriver *find_hdev_driver(const char *filename)
391 {
392 int score_max = 0, score;
393 BlockDriver *drv = NULL, *d;
394
395 QLIST_FOREACH(d, &bdrv_drivers, list) {
396 if (d->bdrv_probe_device) {
397 score = d->bdrv_probe_device(filename);
398 if (score > score_max) {
399 score_max = score;
400 drv = d;
401 }
402 }
403 }
404
405 return drv;
406 }
407
408 BlockDriver *bdrv_find_protocol(const char *filename)
409 {
410 BlockDriver *drv1;
411 char protocol[128];
412 int len;
413 const char *p;
414
415 /* TODO Drivers without bdrv_file_open must be specified explicitly */
416
417 /*
418 * XXX(hch): we really should not let host device detection
419 * override an explicit protocol specification, but moving this
420 * later breaks access to device names with colons in them.
421 * Thanks to the brain-dead persistent naming schemes on udev-
422 * based Linux systems those actually are quite common.
423 */
424 drv1 = find_hdev_driver(filename);
425 if (drv1) {
426 return drv1;
427 }
428
429 if (!path_has_protocol(filename)) {
430 return bdrv_find_format("file");
431 }
432 p = strchr(filename, ':');
433 assert(p != NULL);
434 len = p - filename;
435 if (len > sizeof(protocol) - 1)
436 len = sizeof(protocol) - 1;
437 memcpy(protocol, filename, len);
438 protocol[len] = '\0';
439 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
440 if (drv1->protocol_name &&
441 !strcmp(drv1->protocol_name, protocol)) {
442 return drv1;
443 }
444 }
445 return NULL;
446 }
447
448 static int find_image_format(const char *filename, BlockDriver **pdrv)
449 {
450 int ret, score, score_max;
451 BlockDriver *drv1, *drv;
452 uint8_t buf[2048];
453 BlockDriverState *bs;
454
455 ret = bdrv_file_open(&bs, filename, 0);
456 if (ret < 0) {
457 *pdrv = NULL;
458 return ret;
459 }
460
461 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
462 if (bs->sg || !bdrv_is_inserted(bs)) {
463 bdrv_delete(bs);
464 drv = bdrv_find_format("raw");
465 if (!drv) {
466 ret = -ENOENT;
467 }
468 *pdrv = drv;
469 return ret;
470 }
471
472 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
473 bdrv_delete(bs);
474 if (ret < 0) {
475 *pdrv = NULL;
476 return ret;
477 }
478
479 score_max = 0;
480 drv = NULL;
481 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
482 if (drv1->bdrv_probe) {
483 score = drv1->bdrv_probe(buf, ret, filename);
484 if (score > score_max) {
485 score_max = score;
486 drv = drv1;
487 }
488 }
489 }
490 if (!drv) {
491 ret = -ENOENT;
492 }
493 *pdrv = drv;
494 return ret;
495 }
496
497 /**
498 * Set the current 'total_sectors' value
499 */
500 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
501 {
502 BlockDriver *drv = bs->drv;
503
504 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
505 if (bs->sg)
506 return 0;
507
508 /* query actual device if possible, otherwise just trust the hint */
509 if (drv->bdrv_getlength) {
510 int64_t length = drv->bdrv_getlength(bs);
511 if (length < 0) {
512 return length;
513 }
514 hint = length >> BDRV_SECTOR_BITS;
515 }
516
517 bs->total_sectors = hint;
518 return 0;
519 }
520
521 /**
522 * Set open flags for a given cache mode
523 *
524 * Return 0 on success, -1 if the cache mode was invalid.
525 */
526 int bdrv_parse_cache_flags(const char *mode, int *flags)
527 {
528 *flags &= ~BDRV_O_CACHE_MASK;
529
530 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
531 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
532 } else if (!strcmp(mode, "directsync")) {
533 *flags |= BDRV_O_NOCACHE;
534 } else if (!strcmp(mode, "writeback")) {
535 *flags |= BDRV_O_CACHE_WB;
536 } else if (!strcmp(mode, "unsafe")) {
537 *flags |= BDRV_O_CACHE_WB;
538 *flags |= BDRV_O_NO_FLUSH;
539 } else if (!strcmp(mode, "writethrough")) {
540 /* this is the default */
541 } else {
542 return -1;
543 }
544
545 return 0;
546 }
547
548 /**
549 * The copy-on-read flag is actually a reference count so multiple users may
550 * use the feature without worrying about clobbering its previous state.
551 * Copy-on-read stays enabled until all users have called to disable it.
552 */
553 void bdrv_enable_copy_on_read(BlockDriverState *bs)
554 {
555 bs->copy_on_read++;
556 }
557
558 void bdrv_disable_copy_on_read(BlockDriverState *bs)
559 {
560 assert(bs->copy_on_read > 0);
561 bs->copy_on_read--;
562 }
563
564 /*
565 * Common part for opening disk images and files
566 */
567 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
568 int flags, BlockDriver *drv)
569 {
570 int ret, open_flags;
571
572 assert(drv != NULL);
573
574 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
575
576 bs->file = NULL;
577 bs->total_sectors = 0;
578 bs->encrypted = 0;
579 bs->valid_key = 0;
580 bs->sg = 0;
581 bs->open_flags = flags;
582 bs->growable = 0;
583 bs->buffer_alignment = 512;
584
585 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
586 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
587 bdrv_enable_copy_on_read(bs);
588 }
589
590 pstrcpy(bs->filename, sizeof(bs->filename), filename);
591 bs->backing_file[0] = '\0';
592
593 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
594 return -ENOTSUP;
595 }
596
597 bs->drv = drv;
598 bs->opaque = g_malloc0(drv->instance_size);
599
600 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
601
602 /*
603 * Clear flags that are internal to the block layer before opening the
604 * image.
605 */
606 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
607
608 /*
609 * Snapshots should be writable.
610 */
611 if (bs->is_temporary) {
612 open_flags |= BDRV_O_RDWR;
613 }
614
615 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
616
617 /* Open the image, either directly or using a protocol */
618 if (drv->bdrv_file_open) {
619 ret = drv->bdrv_file_open(bs, filename, open_flags);
620 } else {
621 ret = bdrv_file_open(&bs->file, filename, open_flags);
622 if (ret >= 0) {
623 ret = drv->bdrv_open(bs, open_flags);
624 }
625 }
626
627 if (ret < 0) {
628 goto free_and_fail;
629 }
630
631 ret = refresh_total_sectors(bs, bs->total_sectors);
632 if (ret < 0) {
633 goto free_and_fail;
634 }
635
636 #ifndef _WIN32
637 if (bs->is_temporary) {
638 unlink(filename);
639 }
640 #endif
641 return 0;
642
643 free_and_fail:
644 if (bs->file) {
645 bdrv_delete(bs->file);
646 bs->file = NULL;
647 }
648 g_free(bs->opaque);
649 bs->opaque = NULL;
650 bs->drv = NULL;
651 return ret;
652 }
653
654 /*
655 * Opens a file using a protocol (file, host_device, nbd, ...)
656 */
657 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
658 {
659 BlockDriverState *bs;
660 BlockDriver *drv;
661 int ret;
662
663 drv = bdrv_find_protocol(filename);
664 if (!drv) {
665 return -ENOENT;
666 }
667
668 bs = bdrv_new("");
669 ret = bdrv_open_common(bs, filename, flags, drv);
670 if (ret < 0) {
671 bdrv_delete(bs);
672 return ret;
673 }
674 bs->growable = 1;
675 *pbs = bs;
676 return 0;
677 }
678
679 /*
680 * Opens a disk image (raw, qcow2, vmdk, ...)
681 */
682 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
683 BlockDriver *drv)
684 {
685 int ret;
686 char tmp_filename[PATH_MAX];
687
688 if (flags & BDRV_O_SNAPSHOT) {
689 BlockDriverState *bs1;
690 int64_t total_size;
691 int is_protocol = 0;
692 BlockDriver *bdrv_qcow2;
693 QEMUOptionParameter *options;
694 char backing_filename[PATH_MAX];
695
696 /* if snapshot, we create a temporary backing file and open it
697 instead of opening 'filename' directly */
698
699 /* if there is a backing file, use it */
700 bs1 = bdrv_new("");
701 ret = bdrv_open(bs1, filename, 0, drv);
702 if (ret < 0) {
703 bdrv_delete(bs1);
704 return ret;
705 }
706 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
707
708 if (bs1->drv && bs1->drv->protocol_name)
709 is_protocol = 1;
710
711 bdrv_delete(bs1);
712
713 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
714
715 /* Real path is meaningless for protocols */
716 if (is_protocol)
717 snprintf(backing_filename, sizeof(backing_filename),
718 "%s", filename);
719 else if (!realpath(filename, backing_filename))
720 return -errno;
721
722 bdrv_qcow2 = bdrv_find_format("qcow2");
723 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
724
725 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
726 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
727 if (drv) {
728 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
729 drv->format_name);
730 }
731
732 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
733 free_option_parameters(options);
734 if (ret < 0) {
735 return ret;
736 }
737
738 filename = tmp_filename;
739 drv = bdrv_qcow2;
740 bs->is_temporary = 1;
741 }
742
743 /* Find the right image format driver */
744 if (!drv) {
745 ret = find_image_format(filename, &drv);
746 }
747
748 if (!drv) {
749 goto unlink_and_fail;
750 }
751
752 /* Open the image */
753 ret = bdrv_open_common(bs, filename, flags, drv);
754 if (ret < 0) {
755 goto unlink_and_fail;
756 }
757
758 /* If there is a backing file, use it */
759 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
760 char backing_filename[PATH_MAX];
761 int back_flags;
762 BlockDriver *back_drv = NULL;
763
764 bs->backing_hd = bdrv_new("");
765
766 if (path_has_protocol(bs->backing_file)) {
767 pstrcpy(backing_filename, sizeof(backing_filename),
768 bs->backing_file);
769 } else {
770 path_combine(backing_filename, sizeof(backing_filename),
771 filename, bs->backing_file);
772 }
773
774 if (bs->backing_format[0] != '\0') {
775 back_drv = bdrv_find_format(bs->backing_format);
776 }
777
778 /* backing files always opened read-only */
779 back_flags =
780 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
781
782 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
783 if (ret < 0) {
784 bdrv_close(bs);
785 return ret;
786 }
787 if (bs->is_temporary) {
788 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
789 } else {
790 /* base image inherits from "parent" */
791 bs->backing_hd->keep_read_only = bs->keep_read_only;
792 }
793 }
794
795 if (!bdrv_key_required(bs)) {
796 bdrv_dev_change_media_cb(bs, true);
797 }
798
799 /* throttling disk I/O limits */
800 if (bs->io_limits_enabled) {
801 bdrv_io_limits_enable(bs);
802 }
803
804 return 0;
805
806 unlink_and_fail:
807 if (bs->is_temporary) {
808 unlink(filename);
809 }
810 return ret;
811 }
812
813 void bdrv_close(BlockDriverState *bs)
814 {
815 if (bs->drv) {
816 if (bs == bs_snapshots) {
817 bs_snapshots = NULL;
818 }
819 if (bs->backing_hd) {
820 bdrv_delete(bs->backing_hd);
821 bs->backing_hd = NULL;
822 }
823 bs->drv->bdrv_close(bs);
824 g_free(bs->opaque);
825 #ifdef _WIN32
826 if (bs->is_temporary) {
827 unlink(bs->filename);
828 }
829 #endif
830 bs->opaque = NULL;
831 bs->drv = NULL;
832 bs->copy_on_read = 0;
833
834 if (bs->file != NULL) {
835 bdrv_close(bs->file);
836 }
837
838 bdrv_dev_change_media_cb(bs, false);
839 }
840
841 /*throttling disk I/O limits*/
842 if (bs->io_limits_enabled) {
843 bdrv_io_limits_disable(bs);
844 }
845 }
846
847 void bdrv_close_all(void)
848 {
849 BlockDriverState *bs;
850
851 QTAILQ_FOREACH(bs, &bdrv_states, list) {
852 bdrv_close(bs);
853 }
854 }
855
856 /*
857 * Wait for pending requests to complete across all BlockDriverStates
858 *
859 * This function does not flush data to disk, use bdrv_flush_all() for that
860 * after calling this function.
861 */
862 void bdrv_drain_all(void)
863 {
864 BlockDriverState *bs;
865
866 qemu_aio_flush();
867
868 /* If requests are still pending there is a bug somewhere */
869 QTAILQ_FOREACH(bs, &bdrv_states, list) {
870 assert(QLIST_EMPTY(&bs->tracked_requests));
871 assert(qemu_co_queue_empty(&bs->throttled_reqs));
872 }
873 }
874
875 /* make a BlockDriverState anonymous by removing from bdrv_state list.
876 Also, NULL terminate the device_name to prevent double remove */
877 void bdrv_make_anon(BlockDriverState *bs)
878 {
879 if (bs->device_name[0] != '\0') {
880 QTAILQ_REMOVE(&bdrv_states, bs, list);
881 }
882 bs->device_name[0] = '\0';
883 }
884
885 void bdrv_delete(BlockDriverState *bs)
886 {
887 assert(!bs->dev);
888
889 /* remove from list, if necessary */
890 bdrv_make_anon(bs);
891
892 bdrv_close(bs);
893 if (bs->file != NULL) {
894 bdrv_delete(bs->file);
895 }
896
897 assert(bs != bs_snapshots);
898 g_free(bs);
899 }
900
901 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
902 /* TODO change to DeviceState *dev when all users are qdevified */
903 {
904 if (bs->dev) {
905 return -EBUSY;
906 }
907 bs->dev = dev;
908 bdrv_iostatus_reset(bs);
909 return 0;
910 }
911
912 /* TODO qdevified devices don't use this, remove when devices are qdevified */
913 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
914 {
915 if (bdrv_attach_dev(bs, dev) < 0) {
916 abort();
917 }
918 }
919
920 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
921 /* TODO change to DeviceState *dev when all users are qdevified */
922 {
923 assert(bs->dev == dev);
924 bs->dev = NULL;
925 bs->dev_ops = NULL;
926 bs->dev_opaque = NULL;
927 bs->buffer_alignment = 512;
928 }
929
930 /* TODO change to return DeviceState * when all users are qdevified */
931 void *bdrv_get_attached_dev(BlockDriverState *bs)
932 {
933 return bs->dev;
934 }
935
936 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
937 void *opaque)
938 {
939 bs->dev_ops = ops;
940 bs->dev_opaque = opaque;
941 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
942 bs_snapshots = NULL;
943 }
944 }
945
946 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
947 BlockQMPEventAction action, int is_read)
948 {
949 QObject *data;
950 const char *action_str;
951
952 switch (action) {
953 case BDRV_ACTION_REPORT:
954 action_str = "report";
955 break;
956 case BDRV_ACTION_IGNORE:
957 action_str = "ignore";
958 break;
959 case BDRV_ACTION_STOP:
960 action_str = "stop";
961 break;
962 default:
963 abort();
964 }
965
966 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
967 bdrv->device_name,
968 action_str,
969 is_read ? "read" : "write");
970 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
971
972 qobject_decref(data);
973 }
974
975 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
976 {
977 QObject *data;
978
979 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
980 bdrv_get_device_name(bs), ejected);
981 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
982
983 qobject_decref(data);
984 }
985
986 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
987 {
988 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
989 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
990 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
991 if (tray_was_closed) {
992 /* tray open */
993 bdrv_emit_qmp_eject_event(bs, true);
994 }
995 if (load) {
996 /* tray close */
997 bdrv_emit_qmp_eject_event(bs, false);
998 }
999 }
1000 }
1001
1002 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1003 {
1004 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1005 }
1006
1007 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1008 {
1009 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1010 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1011 }
1012 }
1013
1014 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1015 {
1016 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1017 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1018 }
1019 return false;
1020 }
1021
1022 static void bdrv_dev_resize_cb(BlockDriverState *bs)
1023 {
1024 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1025 bs->dev_ops->resize_cb(bs->dev_opaque);
1026 }
1027 }
1028
1029 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1030 {
1031 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1032 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1033 }
1034 return false;
1035 }
1036
1037 /*
1038 * Run consistency checks on an image
1039 *
1040 * Returns 0 if the check could be completed (it doesn't mean that the image is
1041 * free of errors) or -errno when an internal error occurred. The results of the
1042 * check are stored in res.
1043 */
1044 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
1045 {
1046 if (bs->drv->bdrv_check == NULL) {
1047 return -ENOTSUP;
1048 }
1049
1050 memset(res, 0, sizeof(*res));
1051 return bs->drv->bdrv_check(bs, res);
1052 }
1053
1054 #define COMMIT_BUF_SECTORS 2048
1055
1056 /* commit COW file into the raw image */
1057 int bdrv_commit(BlockDriverState *bs)
1058 {
1059 BlockDriver *drv = bs->drv;
1060 BlockDriver *backing_drv;
1061 int64_t sector, total_sectors;
1062 int n, ro, open_flags;
1063 int ret = 0, rw_ret = 0;
1064 uint8_t *buf;
1065 char filename[1024];
1066 BlockDriverState *bs_rw, *bs_ro;
1067
1068 if (!drv)
1069 return -ENOMEDIUM;
1070
1071 if (!bs->backing_hd) {
1072 return -ENOTSUP;
1073 }
1074
1075 if (bs->backing_hd->keep_read_only) {
1076 return -EACCES;
1077 }
1078
1079 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1080 return -EBUSY;
1081 }
1082
1083 backing_drv = bs->backing_hd->drv;
1084 ro = bs->backing_hd->read_only;
1085 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1086 open_flags = bs->backing_hd->open_flags;
1087
1088 if (ro) {
1089 /* re-open as RW */
1090 bdrv_delete(bs->backing_hd);
1091 bs->backing_hd = NULL;
1092 bs_rw = bdrv_new("");
1093 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
1094 backing_drv);
1095 if (rw_ret < 0) {
1096 bdrv_delete(bs_rw);
1097 /* try to re-open read-only */
1098 bs_ro = bdrv_new("");
1099 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1100 backing_drv);
1101 if (ret < 0) {
1102 bdrv_delete(bs_ro);
1103 /* drive not functional anymore */
1104 bs->drv = NULL;
1105 return ret;
1106 }
1107 bs->backing_hd = bs_ro;
1108 return rw_ret;
1109 }
1110 bs->backing_hd = bs_rw;
1111 }
1112
1113 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1114 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1115
1116 for (sector = 0; sector < total_sectors; sector += n) {
1117 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
1118
1119 if (bdrv_read(bs, sector, buf, n) != 0) {
1120 ret = -EIO;
1121 goto ro_cleanup;
1122 }
1123
1124 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1125 ret = -EIO;
1126 goto ro_cleanup;
1127 }
1128 }
1129 }
1130
1131 if (drv->bdrv_make_empty) {
1132 ret = drv->bdrv_make_empty(bs);
1133 bdrv_flush(bs);
1134 }
1135
1136 /*
1137 * Make sure all data we wrote to the backing device is actually
1138 * stable on disk.
1139 */
1140 if (bs->backing_hd)
1141 bdrv_flush(bs->backing_hd);
1142
1143 ro_cleanup:
1144 g_free(buf);
1145
1146 if (ro) {
1147 /* re-open as RO */
1148 bdrv_delete(bs->backing_hd);
1149 bs->backing_hd = NULL;
1150 bs_ro = bdrv_new("");
1151 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1152 backing_drv);
1153 if (ret < 0) {
1154 bdrv_delete(bs_ro);
1155 /* drive not functional anymore */
1156 bs->drv = NULL;
1157 return ret;
1158 }
1159 bs->backing_hd = bs_ro;
1160 bs->backing_hd->keep_read_only = 0;
1161 }
1162
1163 return ret;
1164 }
1165
1166 void bdrv_commit_all(void)
1167 {
1168 BlockDriverState *bs;
1169
1170 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1171 bdrv_commit(bs);
1172 }
1173 }
1174
1175 struct BdrvTrackedRequest {
1176 BlockDriverState *bs;
1177 int64_t sector_num;
1178 int nb_sectors;
1179 bool is_write;
1180 QLIST_ENTRY(BdrvTrackedRequest) list;
1181 Coroutine *co; /* owner, used for deadlock detection */
1182 CoQueue wait_queue; /* coroutines blocked on this request */
1183 };
1184
1185 /**
1186 * Remove an active request from the tracked requests list
1187 *
1188 * This function should be called when a tracked request is completing.
1189 */
1190 static void tracked_request_end(BdrvTrackedRequest *req)
1191 {
1192 QLIST_REMOVE(req, list);
1193 qemu_co_queue_restart_all(&req->wait_queue);
1194 }
1195
1196 /**
1197 * Add an active request to the tracked requests list
1198 */
1199 static void tracked_request_begin(BdrvTrackedRequest *req,
1200 BlockDriverState *bs,
1201 int64_t sector_num,
1202 int nb_sectors, bool is_write)
1203 {
1204 *req = (BdrvTrackedRequest){
1205 .bs = bs,
1206 .sector_num = sector_num,
1207 .nb_sectors = nb_sectors,
1208 .is_write = is_write,
1209 .co = qemu_coroutine_self(),
1210 };
1211
1212 qemu_co_queue_init(&req->wait_queue);
1213
1214 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1215 }
1216
1217 /**
1218 * Round a region to cluster boundaries
1219 */
1220 static void round_to_clusters(BlockDriverState *bs,
1221 int64_t sector_num, int nb_sectors,
1222 int64_t *cluster_sector_num,
1223 int *cluster_nb_sectors)
1224 {
1225 BlockDriverInfo bdi;
1226
1227 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1228 *cluster_sector_num = sector_num;
1229 *cluster_nb_sectors = nb_sectors;
1230 } else {
1231 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1232 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1233 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1234 nb_sectors, c);
1235 }
1236 }
1237
1238 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1239 int64_t sector_num, int nb_sectors) {
1240 /* aaaa bbbb */
1241 if (sector_num >= req->sector_num + req->nb_sectors) {
1242 return false;
1243 }
1244 /* bbbb aaaa */
1245 if (req->sector_num >= sector_num + nb_sectors) {
1246 return false;
1247 }
1248 return true;
1249 }
1250
1251 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1252 int64_t sector_num, int nb_sectors)
1253 {
1254 BdrvTrackedRequest *req;
1255 int64_t cluster_sector_num;
1256 int cluster_nb_sectors;
1257 bool retry;
1258
1259 /* If we touch the same cluster it counts as an overlap. This guarantees
1260 * that allocating writes will be serialized and not race with each other
1261 * for the same cluster. For example, in copy-on-read it ensures that the
1262 * CoR read and write operations are atomic and guest writes cannot
1263 * interleave between them.
1264 */
1265 round_to_clusters(bs, sector_num, nb_sectors,
1266 &cluster_sector_num, &cluster_nb_sectors);
1267
1268 do {
1269 retry = false;
1270 QLIST_FOREACH(req, &bs->tracked_requests, list) {
1271 if (tracked_request_overlaps(req, cluster_sector_num,
1272 cluster_nb_sectors)) {
1273 /* Hitting this means there was a reentrant request, for
1274 * example, a block driver issuing nested requests. This must
1275 * never happen since it means deadlock.
1276 */
1277 assert(qemu_coroutine_self() != req->co);
1278
1279 qemu_co_queue_wait(&req->wait_queue);
1280 retry = true;
1281 break;
1282 }
1283 }
1284 } while (retry);
1285 }
1286
1287 /*
1288 * Return values:
1289 * 0 - success
1290 * -EINVAL - backing format specified, but no file
1291 * -ENOSPC - can't update the backing file because no space is left in the
1292 * image file header
1293 * -ENOTSUP - format driver doesn't support changing the backing file
1294 */
1295 int bdrv_change_backing_file(BlockDriverState *bs,
1296 const char *backing_file, const char *backing_fmt)
1297 {
1298 BlockDriver *drv = bs->drv;
1299
1300 if (drv->bdrv_change_backing_file != NULL) {
1301 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
1302 } else {
1303 return -ENOTSUP;
1304 }
1305 }
1306
1307 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1308 size_t size)
1309 {
1310 int64_t len;
1311
1312 if (!bdrv_is_inserted(bs))
1313 return -ENOMEDIUM;
1314
1315 if (bs->growable)
1316 return 0;
1317
1318 len = bdrv_getlength(bs);
1319
1320 if (offset < 0)
1321 return -EIO;
1322
1323 if ((offset > len) || (len - offset < size))
1324 return -EIO;
1325
1326 return 0;
1327 }
1328
1329 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1330 int nb_sectors)
1331 {
1332 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1333 nb_sectors * BDRV_SECTOR_SIZE);
1334 }
1335
1336 typedef struct RwCo {
1337 BlockDriverState *bs;
1338 int64_t sector_num;
1339 int nb_sectors;
1340 QEMUIOVector *qiov;
1341 bool is_write;
1342 int ret;
1343 } RwCo;
1344
1345 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1346 {
1347 RwCo *rwco = opaque;
1348
1349 if (!rwco->is_write) {
1350 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
1351 rwco->nb_sectors, rwco->qiov, 0);
1352 } else {
1353 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
1354 rwco->nb_sectors, rwco->qiov, 0);
1355 }
1356 }
1357
1358 /*
1359 * Process a synchronous request using coroutines
1360 */
1361 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1362 int nb_sectors, bool is_write)
1363 {
1364 QEMUIOVector qiov;
1365 struct iovec iov = {
1366 .iov_base = (void *)buf,
1367 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1368 };
1369 Coroutine *co;
1370 RwCo rwco = {
1371 .bs = bs,
1372 .sector_num = sector_num,
1373 .nb_sectors = nb_sectors,
1374 .qiov = &qiov,
1375 .is_write = is_write,
1376 .ret = NOT_DONE,
1377 };
1378
1379 qemu_iovec_init_external(&qiov, &iov, 1);
1380
1381 if (qemu_in_coroutine()) {
1382 /* Fast-path if already in coroutine context */
1383 bdrv_rw_co_entry(&rwco);
1384 } else {
1385 co = qemu_coroutine_create(bdrv_rw_co_entry);
1386 qemu_coroutine_enter(co, &rwco);
1387 while (rwco.ret == NOT_DONE) {
1388 qemu_aio_wait();
1389 }
1390 }
1391 return rwco.ret;
1392 }
1393
1394 /* return < 0 if error. See bdrv_write() for the return codes */
1395 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
1396 uint8_t *buf, int nb_sectors)
1397 {
1398 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
1399 }
1400
1401 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
1402 int nb_sectors, int dirty)
1403 {
1404 int64_t start, end;
1405 unsigned long val, idx, bit;
1406
1407 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
1408 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
1409
1410 for (; start <= end; start++) {
1411 idx = start / (sizeof(unsigned long) * 8);
1412 bit = start % (sizeof(unsigned long) * 8);
1413 val = bs->dirty_bitmap[idx];
1414 if (dirty) {
1415 if (!(val & (1UL << bit))) {
1416 bs->dirty_count++;
1417 val |= 1UL << bit;
1418 }
1419 } else {
1420 if (val & (1UL << bit)) {
1421 bs->dirty_count--;
1422 val &= ~(1UL << bit);
1423 }
1424 }
1425 bs->dirty_bitmap[idx] = val;
1426 }
1427 }
1428
1429 /* Return < 0 if error. Important errors are:
1430 -EIO generic I/O error (may happen for all errors)
1431 -ENOMEDIUM No media inserted.
1432 -EINVAL Invalid sector number or nb_sectors
1433 -EACCES Trying to write a read-only device
1434 */
1435 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
1436 const uint8_t *buf, int nb_sectors)
1437 {
1438 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
1439 }
1440
1441 int bdrv_pread(BlockDriverState *bs, int64_t offset,
1442 void *buf, int count1)
1443 {
1444 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1445 int len, nb_sectors, count;
1446 int64_t sector_num;
1447 int ret;
1448
1449 count = count1;
1450 /* first read to align to sector start */
1451 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1452 if (len > count)
1453 len = count;
1454 sector_num = offset >> BDRV_SECTOR_BITS;
1455 if (len > 0) {
1456 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1457 return ret;
1458 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1459 count -= len;
1460 if (count == 0)
1461 return count1;
1462 sector_num++;
1463 buf += len;
1464 }
1465
1466 /* read the sectors "in place" */
1467 nb_sectors = count >> BDRV_SECTOR_BITS;
1468 if (nb_sectors > 0) {
1469 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1470 return ret;
1471 sector_num += nb_sectors;
1472 len = nb_sectors << BDRV_SECTOR_BITS;
1473 buf += len;
1474 count -= len;
1475 }
1476
1477 /* add data from the last sector */
1478 if (count > 0) {
1479 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1480 return ret;
1481 memcpy(buf, tmp_buf, count);
1482 }
1483 return count1;
1484 }
1485
1486 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1487 const void *buf, int count1)
1488 {
1489 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1490 int len, nb_sectors, count;
1491 int64_t sector_num;
1492 int ret;
1493
1494 count = count1;
1495 /* first write to align to sector start */
1496 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1497 if (len > count)
1498 len = count;
1499 sector_num = offset >> BDRV_SECTOR_BITS;
1500 if (len > 0) {
1501 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1502 return ret;
1503 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1504 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1505 return ret;
1506 count -= len;
1507 if (count == 0)
1508 return count1;
1509 sector_num++;
1510 buf += len;
1511 }
1512
1513 /* write the sectors "in place" */
1514 nb_sectors = count >> BDRV_SECTOR_BITS;
1515 if (nb_sectors > 0) {
1516 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1517 return ret;
1518 sector_num += nb_sectors;
1519 len = nb_sectors << BDRV_SECTOR_BITS;
1520 buf += len;
1521 count -= len;
1522 }
1523
1524 /* add data from the last sector */
1525 if (count > 0) {
1526 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1527 return ret;
1528 memcpy(tmp_buf, buf, count);
1529 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1530 return ret;
1531 }
1532 return count1;
1533 }
1534
1535 /*
1536 * Writes to the file and ensures that no writes are reordered across this
1537 * request (acts as a barrier)
1538 *
1539 * Returns 0 on success, -errno in error cases.
1540 */
1541 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1542 const void *buf, int count)
1543 {
1544 int ret;
1545
1546 ret = bdrv_pwrite(bs, offset, buf, count);
1547 if (ret < 0) {
1548 return ret;
1549 }
1550
1551 /* No flush needed for cache modes that use O_DSYNC */
1552 if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) {
1553 bdrv_flush(bs);
1554 }
1555
1556 return 0;
1557 }
1558
1559 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
1560 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1561 {
1562 /* Perform I/O through a temporary buffer so that users who scribble over
1563 * their read buffer while the operation is in progress do not end up
1564 * modifying the image file. This is critical for zero-copy guest I/O
1565 * where anything might happen inside guest memory.
1566 */
1567 void *bounce_buffer;
1568
1569 BlockDriver *drv = bs->drv;
1570 struct iovec iov;
1571 QEMUIOVector bounce_qiov;
1572 int64_t cluster_sector_num;
1573 int cluster_nb_sectors;
1574 size_t skip_bytes;
1575 int ret;
1576
1577 /* Cover entire cluster so no additional backing file I/O is required when
1578 * allocating cluster in the image file.
1579 */
1580 round_to_clusters(bs, sector_num, nb_sectors,
1581 &cluster_sector_num, &cluster_nb_sectors);
1582
1583 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
1584 cluster_sector_num, cluster_nb_sectors);
1585
1586 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
1587 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
1588 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
1589
1590 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
1591 &bounce_qiov);
1592 if (ret < 0) {
1593 goto err;
1594 }
1595
1596 if (drv->bdrv_co_write_zeroes &&
1597 buffer_is_zero(bounce_buffer, iov.iov_len)) {
1598 ret = drv->bdrv_co_write_zeroes(bs, cluster_sector_num,
1599 cluster_nb_sectors);
1600 } else {
1601 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
1602 &bounce_qiov);
1603 }
1604
1605 if (ret < 0) {
1606 /* It might be okay to ignore write errors for guest requests. If this
1607 * is a deliberate copy-on-read then we don't want to ignore the error.
1608 * Simply report it in all cases.
1609 */
1610 goto err;
1611 }
1612
1613 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
1614 qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
1615 nb_sectors * BDRV_SECTOR_SIZE);
1616
1617 err:
1618 qemu_vfree(bounce_buffer);
1619 return ret;
1620 }
1621
1622 /*
1623 * Handle a read request in coroutine context
1624 */
1625 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1626 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1627 BdrvRequestFlags flags)
1628 {
1629 BlockDriver *drv = bs->drv;
1630 BdrvTrackedRequest req;
1631 int ret;
1632
1633 if (!drv) {
1634 return -ENOMEDIUM;
1635 }
1636 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1637 return -EIO;
1638 }
1639
1640 /* throttling disk read I/O */
1641 if (bs->io_limits_enabled) {
1642 bdrv_io_limits_intercept(bs, false, nb_sectors);
1643 }
1644
1645 if (bs->copy_on_read) {
1646 flags |= BDRV_REQ_COPY_ON_READ;
1647 }
1648 if (flags & BDRV_REQ_COPY_ON_READ) {
1649 bs->copy_on_read_in_flight++;
1650 }
1651
1652 if (bs->copy_on_read_in_flight) {
1653 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1654 }
1655
1656 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
1657
1658 if (flags & BDRV_REQ_COPY_ON_READ) {
1659 int pnum;
1660
1661 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
1662 if (ret < 0) {
1663 goto out;
1664 }
1665
1666 if (!ret || pnum != nb_sectors) {
1667 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
1668 goto out;
1669 }
1670 }
1671
1672 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1673
1674 out:
1675 tracked_request_end(&req);
1676
1677 if (flags & BDRV_REQ_COPY_ON_READ) {
1678 bs->copy_on_read_in_flight--;
1679 }
1680
1681 return ret;
1682 }
1683
1684 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1685 int nb_sectors, QEMUIOVector *qiov)
1686 {
1687 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1688
1689 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1690 }
1691
1692 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1693 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1694 {
1695 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1696
1697 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1698 BDRV_REQ_COPY_ON_READ);
1699 }
1700
1701 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1702 int64_t sector_num, int nb_sectors)
1703 {
1704 BlockDriver *drv = bs->drv;
1705 QEMUIOVector qiov;
1706 struct iovec iov;
1707 int ret;
1708
1709 /* First try the efficient write zeroes operation */
1710 if (drv->bdrv_co_write_zeroes) {
1711 return drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
1712 }
1713
1714 /* Fall back to bounce buffer if write zeroes is unsupported */
1715 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
1716 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
1717 memset(iov.iov_base, 0, iov.iov_len);
1718 qemu_iovec_init_external(&qiov, &iov, 1);
1719
1720 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
1721
1722 qemu_vfree(iov.iov_base);
1723 return ret;
1724 }
1725
1726 /*
1727 * Handle a write request in coroutine context
1728 */
1729 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1730 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1731 BdrvRequestFlags flags)
1732 {
1733 BlockDriver *drv = bs->drv;
1734 BdrvTrackedRequest req;
1735 int ret;
1736
1737 if (!bs->drv) {
1738 return -ENOMEDIUM;
1739 }
1740 if (bs->read_only) {
1741 return -EACCES;
1742 }
1743 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1744 return -EIO;
1745 }
1746
1747 /* throttling disk write I/O */
1748 if (bs->io_limits_enabled) {
1749 bdrv_io_limits_intercept(bs, true, nb_sectors);
1750 }
1751
1752 if (bs->copy_on_read_in_flight) {
1753 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1754 }
1755
1756 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
1757
1758 if (flags & BDRV_REQ_ZERO_WRITE) {
1759 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
1760 } else {
1761 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1762 }
1763
1764 if (bs->dirty_bitmap) {
1765 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1766 }
1767
1768 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
1769 bs->wr_highest_sector = sector_num + nb_sectors - 1;
1770 }
1771
1772 tracked_request_end(&req);
1773
1774 return ret;
1775 }
1776
1777 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1778 int nb_sectors, QEMUIOVector *qiov)
1779 {
1780 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1781
1782 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1783 }
1784
1785 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1786 int64_t sector_num, int nb_sectors)
1787 {
1788 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
1789
1790 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1791 BDRV_REQ_ZERO_WRITE);
1792 }
1793
1794 /**
1795 * Truncate file to 'offset' bytes (needed only for file protocols)
1796 */
1797 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1798 {
1799 BlockDriver *drv = bs->drv;
1800 int ret;
1801 if (!drv)
1802 return -ENOMEDIUM;
1803 if (!drv->bdrv_truncate)
1804 return -ENOTSUP;
1805 if (bs->read_only)
1806 return -EACCES;
1807 if (bdrv_in_use(bs))
1808 return -EBUSY;
1809 ret = drv->bdrv_truncate(bs, offset);
1810 if (ret == 0) {
1811 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1812 bdrv_dev_resize_cb(bs);
1813 }
1814 return ret;
1815 }
1816
1817 /**
1818 * Length of a allocated file in bytes. Sparse files are counted by actual
1819 * allocated space. Return < 0 if error or unknown.
1820 */
1821 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
1822 {
1823 BlockDriver *drv = bs->drv;
1824 if (!drv) {
1825 return -ENOMEDIUM;
1826 }
1827 if (drv->bdrv_get_allocated_file_size) {
1828 return drv->bdrv_get_allocated_file_size(bs);
1829 }
1830 if (bs->file) {
1831 return bdrv_get_allocated_file_size(bs->file);
1832 }
1833 return -ENOTSUP;
1834 }
1835
1836 /**
1837 * Length of a file in bytes. Return < 0 if error or unknown.
1838 */
1839 int64_t bdrv_getlength(BlockDriverState *bs)
1840 {
1841 BlockDriver *drv = bs->drv;
1842 if (!drv)
1843 return -ENOMEDIUM;
1844
1845 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
1846 if (drv->bdrv_getlength) {
1847 return drv->bdrv_getlength(bs);
1848 }
1849 }
1850 return bs->total_sectors * BDRV_SECTOR_SIZE;
1851 }
1852
1853 /* return 0 as number of sectors if no device present or error */
1854 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1855 {
1856 int64_t length;
1857 length = bdrv_getlength(bs);
1858 if (length < 0)
1859 length = 0;
1860 else
1861 length = length >> BDRV_SECTOR_BITS;
1862 *nb_sectors_ptr = length;
1863 }
1864
1865 struct partition {
1866 uint8_t boot_ind; /* 0x80 - active */
1867 uint8_t head; /* starting head */
1868 uint8_t sector; /* starting sector */
1869 uint8_t cyl; /* starting cylinder */
1870 uint8_t sys_ind; /* What partition type */
1871 uint8_t end_head; /* end head */
1872 uint8_t end_sector; /* end sector */
1873 uint8_t end_cyl; /* end cylinder */
1874 uint32_t start_sect; /* starting sector counting from 0 */
1875 uint32_t nr_sects; /* nr of sectors in partition */
1876 } QEMU_PACKED;
1877
1878 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1879 static int guess_disk_lchs(BlockDriverState *bs,
1880 int *pcylinders, int *pheads, int *psectors)
1881 {
1882 uint8_t buf[BDRV_SECTOR_SIZE];
1883 int ret, i, heads, sectors, cylinders;
1884 struct partition *p;
1885 uint32_t nr_sects;
1886 uint64_t nb_sectors;
1887
1888 bdrv_get_geometry(bs, &nb_sectors);
1889
1890 ret = bdrv_read(bs, 0, buf, 1);
1891 if (ret < 0)
1892 return -1;
1893 /* test msdos magic */
1894 if (buf[510] != 0x55 || buf[511] != 0xaa)
1895 return -1;
1896 for(i = 0; i < 4; i++) {
1897 p = ((struct partition *)(buf + 0x1be)) + i;
1898 nr_sects = le32_to_cpu(p->nr_sects);
1899 if (nr_sects && p->end_head) {
1900 /* We make the assumption that the partition terminates on
1901 a cylinder boundary */
1902 heads = p->end_head + 1;
1903 sectors = p->end_sector & 63;
1904 if (sectors == 0)
1905 continue;
1906 cylinders = nb_sectors / (heads * sectors);
1907 if (cylinders < 1 || cylinders > 16383)
1908 continue;
1909 *pheads = heads;
1910 *psectors = sectors;
1911 *pcylinders = cylinders;
1912 #if 0
1913 printf("guessed geometry: LCHS=%d %d %d\n",
1914 cylinders, heads, sectors);
1915 #endif
1916 return 0;
1917 }
1918 }
1919 return -1;
1920 }
1921
1922 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1923 {
1924 int translation, lba_detected = 0;
1925 int cylinders, heads, secs;
1926 uint64_t nb_sectors;
1927
1928 /* if a geometry hint is available, use it */
1929 bdrv_get_geometry(bs, &nb_sectors);
1930 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1931 translation = bdrv_get_translation_hint(bs);
1932 if (cylinders != 0) {
1933 *pcyls = cylinders;
1934 *pheads = heads;
1935 *psecs = secs;
1936 } else {
1937 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1938 if (heads > 16) {
1939 /* if heads > 16, it means that a BIOS LBA
1940 translation was active, so the default
1941 hardware geometry is OK */
1942 lba_detected = 1;
1943 goto default_geometry;
1944 } else {
1945 *pcyls = cylinders;
1946 *pheads = heads;
1947 *psecs = secs;
1948 /* disable any translation to be in sync with
1949 the logical geometry */
1950 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1951 bdrv_set_translation_hint(bs,
1952 BIOS_ATA_TRANSLATION_NONE);
1953 }
1954 }
1955 } else {
1956 default_geometry:
1957 /* if no geometry, use a standard physical disk geometry */
1958 cylinders = nb_sectors / (16 * 63);
1959
1960 if (cylinders > 16383)
1961 cylinders = 16383;
1962 else if (cylinders < 2)
1963 cylinders = 2;
1964 *pcyls = cylinders;
1965 *pheads = 16;
1966 *psecs = 63;
1967 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1968 if ((*pcyls * *pheads) <= 131072) {
1969 bdrv_set_translation_hint(bs,
1970 BIOS_ATA_TRANSLATION_LARGE);
1971 } else {
1972 bdrv_set_translation_hint(bs,
1973 BIOS_ATA_TRANSLATION_LBA);
1974 }
1975 }
1976 }
1977 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1978 }
1979 }
1980
1981 void bdrv_set_geometry_hint(BlockDriverState *bs,
1982 int cyls, int heads, int secs)
1983 {
1984 bs->cyls = cyls;
1985 bs->heads = heads;
1986 bs->secs = secs;
1987 }
1988
1989 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1990 {
1991 bs->translation = translation;
1992 }
1993
1994 void bdrv_get_geometry_hint(BlockDriverState *bs,
1995 int *pcyls, int *pheads, int *psecs)
1996 {
1997 *pcyls = bs->cyls;
1998 *pheads = bs->heads;
1999 *psecs = bs->secs;
2000 }
2001
2002 /* throttling disk io limits */
2003 void bdrv_set_io_limits(BlockDriverState *bs,
2004 BlockIOLimit *io_limits)
2005 {
2006 bs->io_limits = *io_limits;
2007 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
2008 }
2009
2010 /* Recognize floppy formats */
2011 typedef struct FDFormat {
2012 FDriveType drive;
2013 uint8_t last_sect;
2014 uint8_t max_track;
2015 uint8_t max_head;
2016 } FDFormat;
2017
2018 static const FDFormat fd_formats[] = {
2019 /* First entry is default format */
2020 /* 1.44 MB 3"1/2 floppy disks */
2021 { FDRIVE_DRV_144, 18, 80, 1, },
2022 { FDRIVE_DRV_144, 20, 80, 1, },
2023 { FDRIVE_DRV_144, 21, 80, 1, },
2024 { FDRIVE_DRV_144, 21, 82, 1, },
2025 { FDRIVE_DRV_144, 21, 83, 1, },
2026 { FDRIVE_DRV_144, 22, 80, 1, },
2027 { FDRIVE_DRV_144, 23, 80, 1, },
2028 { FDRIVE_DRV_144, 24, 80, 1, },
2029 /* 2.88 MB 3"1/2 floppy disks */
2030 { FDRIVE_DRV_288, 36, 80, 1, },
2031 { FDRIVE_DRV_288, 39, 80, 1, },
2032 { FDRIVE_DRV_288, 40, 80, 1, },
2033 { FDRIVE_DRV_288, 44, 80, 1, },
2034 { FDRIVE_DRV_288, 48, 80, 1, },
2035 /* 720 kB 3"1/2 floppy disks */
2036 { FDRIVE_DRV_144, 9, 80, 1, },
2037 { FDRIVE_DRV_144, 10, 80, 1, },
2038 { FDRIVE_DRV_144, 10, 82, 1, },
2039 { FDRIVE_DRV_144, 10, 83, 1, },
2040 { FDRIVE_DRV_144, 13, 80, 1, },
2041 { FDRIVE_DRV_144, 14, 80, 1, },
2042 /* 1.2 MB 5"1/4 floppy disks */
2043 { FDRIVE_DRV_120, 15, 80, 1, },
2044 { FDRIVE_DRV_120, 18, 80, 1, },
2045 { FDRIVE_DRV_120, 18, 82, 1, },
2046 { FDRIVE_DRV_120, 18, 83, 1, },
2047 { FDRIVE_DRV_120, 20, 80, 1, },
2048 /* 720 kB 5"1/4 floppy disks */
2049 { FDRIVE_DRV_120, 9, 80, 1, },
2050 { FDRIVE_DRV_120, 11, 80, 1, },
2051 /* 360 kB 5"1/4 floppy disks */
2052 { FDRIVE_DRV_120, 9, 40, 1, },
2053 { FDRIVE_DRV_120, 9, 40, 0, },
2054 { FDRIVE_DRV_120, 10, 41, 1, },
2055 { FDRIVE_DRV_120, 10, 42, 1, },
2056 /* 320 kB 5"1/4 floppy disks */
2057 { FDRIVE_DRV_120, 8, 40, 1, },
2058 { FDRIVE_DRV_120, 8, 40, 0, },
2059 /* 360 kB must match 5"1/4 better than 3"1/2... */
2060 { FDRIVE_DRV_144, 9, 80, 0, },
2061 /* end */
2062 { FDRIVE_DRV_NONE, -1, -1, 0, },
2063 };
2064
2065 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
2066 int *max_track, int *last_sect,
2067 FDriveType drive_in, FDriveType *drive)
2068 {
2069 const FDFormat *parse;
2070 uint64_t nb_sectors, size;
2071 int i, first_match, match;
2072
2073 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
2074 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
2075 /* User defined disk */
2076 } else {
2077 bdrv_get_geometry(bs, &nb_sectors);
2078 match = -1;
2079 first_match = -1;
2080 for (i = 0; ; i++) {
2081 parse = &fd_formats[i];
2082 if (parse->drive == FDRIVE_DRV_NONE) {
2083 break;
2084 }
2085 if (drive_in == parse->drive ||
2086 drive_in == FDRIVE_DRV_NONE) {
2087 size = (parse->max_head + 1) * parse->max_track *
2088 parse->last_sect;
2089 if (nb_sectors == size) {
2090 match = i;
2091 break;
2092 }
2093 if (first_match == -1) {
2094 first_match = i;
2095 }
2096 }
2097 }
2098 if (match == -1) {
2099 if (first_match == -1) {
2100 match = 1;
2101 } else {
2102 match = first_match;
2103 }
2104 parse = &fd_formats[match];
2105 }
2106 *nb_heads = parse->max_head + 1;
2107 *max_track = parse->max_track;
2108 *last_sect = parse->last_sect;
2109 *drive = parse->drive;
2110 }
2111 }
2112
2113 int bdrv_get_translation_hint(BlockDriverState *bs)
2114 {
2115 return bs->translation;
2116 }
2117
2118 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
2119 BlockErrorAction on_write_error)
2120 {
2121 bs->on_read_error = on_read_error;
2122 bs->on_write_error = on_write_error;
2123 }
2124
2125 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
2126 {
2127 return is_read ? bs->on_read_error : bs->on_write_error;
2128 }
2129
2130 int bdrv_is_read_only(BlockDriverState *bs)
2131 {
2132 return bs->read_only;
2133 }
2134
2135 int bdrv_is_sg(BlockDriverState *bs)
2136 {
2137 return bs->sg;
2138 }
2139
2140 int bdrv_enable_write_cache(BlockDriverState *bs)
2141 {
2142 return bs->enable_write_cache;
2143 }
2144
2145 int bdrv_is_encrypted(BlockDriverState *bs)
2146 {
2147 if (bs->backing_hd && bs->backing_hd->encrypted)
2148 return 1;
2149 return bs->encrypted;
2150 }
2151
2152 int bdrv_key_required(BlockDriverState *bs)
2153 {
2154 BlockDriverState *backing_hd = bs->backing_hd;
2155
2156 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2157 return 1;
2158 return (bs->encrypted && !bs->valid_key);
2159 }
2160
2161 int bdrv_set_key(BlockDriverState *bs, const char *key)
2162 {
2163 int ret;
2164 if (bs->backing_hd && bs->backing_hd->encrypted) {
2165 ret = bdrv_set_key(bs->backing_hd, key);
2166 if (ret < 0)
2167 return ret;
2168 if (!bs->encrypted)
2169 return 0;
2170 }
2171 if (!bs->encrypted) {
2172 return -EINVAL;
2173 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2174 return -ENOMEDIUM;
2175 }
2176 ret = bs->drv->bdrv_set_key(bs, key);
2177 if (ret < 0) {
2178 bs->valid_key = 0;
2179 } else if (!bs->valid_key) {
2180 bs->valid_key = 1;
2181 /* call the change callback now, we skipped it on open */
2182 bdrv_dev_change_media_cb(bs, true);
2183 }
2184 return ret;
2185 }
2186
2187 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
2188 {
2189 if (!bs->drv) {
2190 buf[0] = '\0';
2191 } else {
2192 pstrcpy(buf, buf_size, bs->drv->format_name);
2193 }
2194 }
2195
2196 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
2197 void *opaque)
2198 {
2199 BlockDriver *drv;
2200
2201 QLIST_FOREACH(drv, &bdrv_drivers, list) {
2202 it(opaque, drv->format_name);
2203 }
2204 }
2205
2206 BlockDriverState *bdrv_find(const char *name)
2207 {
2208 BlockDriverState *bs;
2209
2210 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2211 if (!strcmp(name, bs->device_name)) {
2212 return bs;
2213 }
2214 }
2215 return NULL;
2216 }
2217
2218 BlockDriverState *bdrv_next(BlockDriverState *bs)
2219 {
2220 if (!bs) {
2221 return QTAILQ_FIRST(&bdrv_states);
2222 }
2223 return QTAILQ_NEXT(bs, list);
2224 }
2225
2226 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
2227 {
2228 BlockDriverState *bs;
2229
2230 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2231 it(opaque, bs);
2232 }
2233 }
2234
2235 const char *bdrv_get_device_name(BlockDriverState *bs)
2236 {
2237 return bs->device_name;
2238 }
2239
2240 void bdrv_flush_all(void)
2241 {
2242 BlockDriverState *bs;
2243
2244 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2245 if (!bdrv_is_read_only(bs) && bdrv_is_inserted(bs)) {
2246 bdrv_flush(bs);
2247 }
2248 }
2249 }
2250
2251 int bdrv_has_zero_init(BlockDriverState *bs)
2252 {
2253 assert(bs->drv);
2254
2255 if (bs->drv->bdrv_has_zero_init) {
2256 return bs->drv->bdrv_has_zero_init(bs);
2257 }
2258
2259 return 1;
2260 }
2261
2262 typedef struct BdrvCoIsAllocatedData {
2263 BlockDriverState *bs;
2264 int64_t sector_num;
2265 int nb_sectors;
2266 int *pnum;
2267 int ret;
2268 bool done;
2269 } BdrvCoIsAllocatedData;
2270
2271 /*
2272 * Returns true iff the specified sector is present in the disk image. Drivers
2273 * not implementing the functionality are assumed to not support backing files,
2274 * hence all their sectors are reported as allocated.
2275 *
2276 * If 'sector_num' is beyond the end of the disk image the return value is 0
2277 * and 'pnum' is set to 0.
2278 *
2279 * 'pnum' is set to the number of sectors (including and immediately following
2280 * the specified sector) that are known to be in the same
2281 * allocated/unallocated state.
2282 *
2283 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2284 * beyond the end of the disk image it will be clamped.
2285 */
2286 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2287 int nb_sectors, int *pnum)
2288 {
2289 int64_t n;
2290
2291 if (sector_num >= bs->total_sectors) {
2292 *pnum = 0;
2293 return 0;
2294 }
2295
2296 n = bs->total_sectors - sector_num;
2297 if (n < nb_sectors) {
2298 nb_sectors = n;
2299 }
2300
2301 if (!bs->drv->bdrv_co_is_allocated) {
2302 *pnum = nb_sectors;
2303 return 1;
2304 }
2305
2306 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2307 }
2308
2309 /* Coroutine wrapper for bdrv_is_allocated() */
2310 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2311 {
2312 BdrvCoIsAllocatedData *data = opaque;
2313 BlockDriverState *bs = data->bs;
2314
2315 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2316 data->pnum);
2317 data->done = true;
2318 }
2319
2320 /*
2321 * Synchronous wrapper around bdrv_co_is_allocated().
2322 *
2323 * See bdrv_co_is_allocated() for details.
2324 */
2325 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2326 int *pnum)
2327 {
2328 Coroutine *co;
2329 BdrvCoIsAllocatedData data = {
2330 .bs = bs,
2331 .sector_num = sector_num,
2332 .nb_sectors = nb_sectors,
2333 .pnum = pnum,
2334 .done = false,
2335 };
2336
2337 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2338 qemu_coroutine_enter(co, &data);
2339 while (!data.done) {
2340 qemu_aio_wait();
2341 }
2342 return data.ret;
2343 }
2344
2345 BlockInfoList *qmp_query_block(Error **errp)
2346 {
2347 BlockInfoList *head = NULL, *cur_item = NULL;
2348 BlockDriverState *bs;
2349
2350 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2351 BlockInfoList *info = g_malloc0(sizeof(*info));
2352
2353 info->value = g_malloc0(sizeof(*info->value));
2354 info->value->device = g_strdup(bs->device_name);
2355 info->value->type = g_strdup("unknown");
2356 info->value->locked = bdrv_dev_is_medium_locked(bs);
2357 info->value->removable = bdrv_dev_has_removable_media(bs);
2358
2359 if (bdrv_dev_has_removable_media(bs)) {
2360 info->value->has_tray_open = true;
2361 info->value->tray_open = bdrv_dev_is_tray_open(bs);
2362 }
2363
2364 if (bdrv_iostatus_is_enabled(bs)) {
2365 info->value->has_io_status = true;
2366 info->value->io_status = bs->iostatus;
2367 }
2368
2369 if (bs->drv) {
2370 info->value->has_inserted = true;
2371 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2372 info->value->inserted->file = g_strdup(bs->filename);
2373 info->value->inserted->ro = bs->read_only;
2374 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2375 info->value->inserted->encrypted = bs->encrypted;
2376 if (bs->backing_file[0]) {
2377 info->value->inserted->has_backing_file = true;
2378 info->value->inserted->backing_file = g_strdup(bs->backing_file);
2379 }
2380
2381 if (bs->io_limits_enabled) {
2382 info->value->inserted->bps =
2383 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2384 info->value->inserted->bps_rd =
2385 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2386 info->value->inserted->bps_wr =
2387 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2388 info->value->inserted->iops =
2389 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2390 info->value->inserted->iops_rd =
2391 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2392 info->value->inserted->iops_wr =
2393 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2394 }
2395 }
2396
2397 /* XXX: waiting for the qapi to support GSList */
2398 if (!cur_item) {
2399 head = cur_item = info;
2400 } else {
2401 cur_item->next = info;
2402 cur_item = info;
2403 }
2404 }
2405
2406 return head;
2407 }
2408
2409 /* Consider exposing this as a full fledged QMP command */
2410 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
2411 {
2412 BlockStats *s;
2413
2414 s = g_malloc0(sizeof(*s));
2415
2416 if (bs->device_name[0]) {
2417 s->has_device = true;
2418 s->device = g_strdup(bs->device_name);
2419 }
2420
2421 s->stats = g_malloc0(sizeof(*s->stats));
2422 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2423 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2424 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2425 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2426 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2427 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2428 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2429 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2430 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2431
2432 if (bs->file) {
2433 s->has_parent = true;
2434 s->parent = qmp_query_blockstat(bs->file, NULL);
2435 }
2436
2437 return s;
2438 }
2439
2440 BlockStatsList *qmp_query_blockstats(Error **errp)
2441 {
2442 BlockStatsList *head = NULL, *cur_item = NULL;
2443 BlockDriverState *bs;
2444
2445 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2446 BlockStatsList *info = g_malloc0(sizeof(*info));
2447 info->value = qmp_query_blockstat(bs, NULL);
2448
2449 /* XXX: waiting for the qapi to support GSList */
2450 if (!cur_item) {
2451 head = cur_item = info;
2452 } else {
2453 cur_item->next = info;
2454 cur_item = info;
2455 }
2456 }
2457
2458 return head;
2459 }
2460
2461 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2462 {
2463 if (bs->backing_hd && bs->backing_hd->encrypted)
2464 return bs->backing_file;
2465 else if (bs->encrypted)
2466 return bs->filename;
2467 else
2468 return NULL;
2469 }
2470
2471 void bdrv_get_backing_filename(BlockDriverState *bs,
2472 char *filename, int filename_size)
2473 {
2474 pstrcpy(filename, filename_size, bs->backing_file);
2475 }
2476
2477 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
2478 const uint8_t *buf, int nb_sectors)
2479 {
2480 BlockDriver *drv = bs->drv;
2481 if (!drv)
2482 return -ENOMEDIUM;
2483 if (!drv->bdrv_write_compressed)
2484 return -ENOTSUP;
2485 if (bdrv_check_request(bs, sector_num, nb_sectors))
2486 return -EIO;
2487
2488 if (bs->dirty_bitmap) {
2489 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2490 }
2491
2492 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2493 }
2494
2495 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2496 {
2497 BlockDriver *drv = bs->drv;
2498 if (!drv)
2499 return -ENOMEDIUM;
2500 if (!drv->bdrv_get_info)
2501 return -ENOTSUP;
2502 memset(bdi, 0, sizeof(*bdi));
2503 return drv->bdrv_get_info(bs, bdi);
2504 }
2505
2506 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2507 int64_t pos, int size)
2508 {
2509 BlockDriver *drv = bs->drv;
2510 if (!drv)
2511 return -ENOMEDIUM;
2512 if (drv->bdrv_save_vmstate)
2513 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2514 if (bs->file)
2515 return bdrv_save_vmstate(bs->file, buf, pos, size);
2516 return -ENOTSUP;
2517 }
2518
2519 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2520 int64_t pos, int size)
2521 {
2522 BlockDriver *drv = bs->drv;
2523 if (!drv)
2524 return -ENOMEDIUM;
2525 if (drv->bdrv_load_vmstate)
2526 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2527 if (bs->file)
2528 return bdrv_load_vmstate(bs->file, buf, pos, size);
2529 return -ENOTSUP;
2530 }
2531
2532 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2533 {
2534 BlockDriver *drv = bs->drv;
2535
2536 if (!drv || !drv->bdrv_debug_event) {
2537 return;
2538 }
2539
2540 return drv->bdrv_debug_event(bs, event);
2541
2542 }
2543
2544 /**************************************************************/
2545 /* handling of snapshots */
2546
2547 int bdrv_can_snapshot(BlockDriverState *bs)
2548 {
2549 BlockDriver *drv = bs->drv;
2550 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
2551 return 0;
2552 }
2553
2554 if (!drv->bdrv_snapshot_create) {
2555 if (bs->file != NULL) {
2556 return bdrv_can_snapshot(bs->file);
2557 }
2558 return 0;
2559 }
2560
2561 return 1;
2562 }
2563
2564 int bdrv_is_snapshot(BlockDriverState *bs)
2565 {
2566 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2567 }
2568
2569 BlockDriverState *bdrv_snapshots(void)
2570 {
2571 BlockDriverState *bs;
2572
2573 if (bs_snapshots) {
2574 return bs_snapshots;
2575 }
2576
2577 bs = NULL;
2578 while ((bs = bdrv_next(bs))) {
2579 if (bdrv_can_snapshot(bs)) {
2580 bs_snapshots = bs;
2581 return bs;
2582 }
2583 }
2584 return NULL;
2585 }
2586
2587 int bdrv_snapshot_create(BlockDriverState *bs,
2588 QEMUSnapshotInfo *sn_info)
2589 {
2590 BlockDriver *drv = bs->drv;
2591 if (!drv)
2592 return -ENOMEDIUM;
2593 if (drv->bdrv_snapshot_create)
2594 return drv->bdrv_snapshot_create(bs, sn_info);
2595 if (bs->file)
2596 return bdrv_snapshot_create(bs->file, sn_info);
2597 return -ENOTSUP;
2598 }
2599
2600 int bdrv_snapshot_goto(BlockDriverState *bs,
2601 const char *snapshot_id)
2602 {
2603 BlockDriver *drv = bs->drv;
2604 int ret, open_ret;
2605
2606 if (!drv)
2607 return -ENOMEDIUM;
2608 if (drv->bdrv_snapshot_goto)
2609 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2610
2611 if (bs->file) {
2612 drv->bdrv_close(bs);
2613 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2614 open_ret = drv->bdrv_open(bs, bs->open_flags);
2615 if (open_ret < 0) {
2616 bdrv_delete(bs->file);
2617 bs->drv = NULL;
2618 return open_ret;
2619 }
2620 return ret;
2621 }
2622
2623 return -ENOTSUP;
2624 }
2625
2626 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2627 {
2628 BlockDriver *drv = bs->drv;
2629 if (!drv)
2630 return -ENOMEDIUM;
2631 if (drv->bdrv_snapshot_delete)
2632 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2633 if (bs->file)
2634 return bdrv_snapshot_delete(bs->file, snapshot_id);
2635 return -ENOTSUP;
2636 }
2637
2638 int bdrv_snapshot_list(BlockDriverState *bs,
2639 QEMUSnapshotInfo **psn_info)
2640 {
2641 BlockDriver *drv = bs->drv;
2642 if (!drv)
2643 return -ENOMEDIUM;
2644 if (drv->bdrv_snapshot_list)
2645 return drv->bdrv_snapshot_list(bs, psn_info);
2646 if (bs->file)
2647 return bdrv_snapshot_list(bs->file, psn_info);
2648 return -ENOTSUP;
2649 }
2650
2651 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2652 const char *snapshot_name)
2653 {
2654 BlockDriver *drv = bs->drv;
2655 if (!drv) {
2656 return -ENOMEDIUM;
2657 }
2658 if (!bs->read_only) {
2659 return -EINVAL;
2660 }
2661 if (drv->bdrv_snapshot_load_tmp) {
2662 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2663 }
2664 return -ENOTSUP;
2665 }
2666
2667 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
2668 const char *backing_file)
2669 {
2670 if (!bs->drv) {
2671 return NULL;
2672 }
2673
2674 if (bs->backing_hd) {
2675 if (strcmp(bs->backing_file, backing_file) == 0) {
2676 return bs->backing_hd;
2677 } else {
2678 return bdrv_find_backing_image(bs->backing_hd, backing_file);
2679 }
2680 }
2681
2682 return NULL;
2683 }
2684
2685 #define NB_SUFFIXES 4
2686
2687 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2688 {
2689 static const char suffixes[NB_SUFFIXES] = "KMGT";
2690 int64_t base;
2691 int i;
2692
2693 if (size <= 999) {
2694 snprintf(buf, buf_size, "%" PRId64, size);
2695 } else {
2696 base = 1024;
2697 for(i = 0; i < NB_SUFFIXES; i++) {
2698 if (size < (10 * base)) {
2699 snprintf(buf, buf_size, "%0.1f%c",
2700 (double)size / base,
2701 suffixes[i]);
2702 break;
2703 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2704 snprintf(buf, buf_size, "%" PRId64 "%c",
2705 ((size + (base >> 1)) / base),
2706 suffixes[i]);
2707 break;
2708 }
2709 base = base * 1024;
2710 }
2711 }
2712 return buf;
2713 }
2714
2715 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2716 {
2717 char buf1[128], date_buf[128], clock_buf[128];
2718 #ifdef _WIN32
2719 struct tm *ptm;
2720 #else
2721 struct tm tm;
2722 #endif
2723 time_t ti;
2724 int64_t secs;
2725
2726 if (!sn) {
2727 snprintf(buf, buf_size,
2728 "%-10s%-20s%7s%20s%15s",
2729 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2730 } else {
2731 ti = sn->date_sec;
2732 #ifdef _WIN32
2733 ptm = localtime(&ti);
2734 strftime(date_buf, sizeof(date_buf),
2735 "%Y-%m-%d %H:%M:%S", ptm);
2736 #else
2737 localtime_r(&ti, &tm);
2738 strftime(date_buf, sizeof(date_buf),
2739 "%Y-%m-%d %H:%M:%S", &tm);
2740 #endif
2741 secs = sn->vm_clock_nsec / 1000000000;
2742 snprintf(clock_buf, sizeof(clock_buf),
2743 "%02d:%02d:%02d.%03d",
2744 (int)(secs / 3600),
2745 (int)((secs / 60) % 60),
2746 (int)(secs % 60),
2747 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2748 snprintf(buf, buf_size,
2749 "%-10s%-20s%7s%20s%15s",
2750 sn->id_str, sn->name,
2751 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2752 date_buf,
2753 clock_buf);
2754 }
2755 return buf;
2756 }
2757
2758 /**************************************************************/
2759 /* async I/Os */
2760
2761 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2762 QEMUIOVector *qiov, int nb_sectors,
2763 BlockDriverCompletionFunc *cb, void *opaque)
2764 {
2765 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2766
2767 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2768 cb, opaque, false);
2769 }
2770
2771 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2772 QEMUIOVector *qiov, int nb_sectors,
2773 BlockDriverCompletionFunc *cb, void *opaque)
2774 {
2775 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2776
2777 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2778 cb, opaque, true);
2779 }
2780
2781
2782 typedef struct MultiwriteCB {
2783 int error;
2784 int num_requests;
2785 int num_callbacks;
2786 struct {
2787 BlockDriverCompletionFunc *cb;
2788 void *opaque;
2789 QEMUIOVector *free_qiov;
2790 void *free_buf;
2791 } callbacks[];
2792 } MultiwriteCB;
2793
2794 static void multiwrite_user_cb(MultiwriteCB *mcb)
2795 {
2796 int i;
2797
2798 for (i = 0; i < mcb->num_callbacks; i++) {
2799 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2800 if (mcb->callbacks[i].free_qiov) {
2801 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2802 }
2803 g_free(mcb->callbacks[i].free_qiov);
2804 qemu_vfree(mcb->callbacks[i].free_buf);
2805 }
2806 }
2807
2808 static void multiwrite_cb(void *opaque, int ret)
2809 {
2810 MultiwriteCB *mcb = opaque;
2811
2812 trace_multiwrite_cb(mcb, ret);
2813
2814 if (ret < 0 && !mcb->error) {
2815 mcb->error = ret;
2816 }
2817
2818 mcb->num_requests--;
2819 if (mcb->num_requests == 0) {
2820 multiwrite_user_cb(mcb);
2821 g_free(mcb);
2822 }
2823 }
2824
2825 static int multiwrite_req_compare(const void *a, const void *b)
2826 {
2827 const BlockRequest *req1 = a, *req2 = b;
2828
2829 /*
2830 * Note that we can't simply subtract req2->sector from req1->sector
2831 * here as that could overflow the return value.
2832 */
2833 if (req1->sector > req2->sector) {
2834 return 1;
2835 } else if (req1->sector < req2->sector) {
2836 return -1;
2837 } else {
2838 return 0;
2839 }
2840 }
2841
2842 /*
2843 * Takes a bunch of requests and tries to merge them. Returns the number of
2844 * requests that remain after merging.
2845 */
2846 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2847 int num_reqs, MultiwriteCB *mcb)
2848 {
2849 int i, outidx;
2850
2851 // Sort requests by start sector
2852 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2853
2854 // Check if adjacent requests touch the same clusters. If so, combine them,
2855 // filling up gaps with zero sectors.
2856 outidx = 0;
2857 for (i = 1; i < num_reqs; i++) {
2858 int merge = 0;
2859 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2860
2861 // This handles the cases that are valid for all block drivers, namely
2862 // exactly sequential writes and overlapping writes.
2863 if (reqs[i].sector <= oldreq_last) {
2864 merge = 1;
2865 }
2866
2867 // The block driver may decide that it makes sense to combine requests
2868 // even if there is a gap of some sectors between them. In this case,
2869 // the gap is filled with zeros (therefore only applicable for yet
2870 // unused space in format like qcow2).
2871 if (!merge && bs->drv->bdrv_merge_requests) {
2872 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2873 }
2874
2875 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2876 merge = 0;
2877 }
2878
2879 if (merge) {
2880 size_t size;
2881 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
2882 qemu_iovec_init(qiov,
2883 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2884
2885 // Add the first request to the merged one. If the requests are
2886 // overlapping, drop the last sectors of the first request.
2887 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2888 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2889
2890 // We might need to add some zeros between the two requests
2891 if (reqs[i].sector > oldreq_last) {
2892 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2893 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2894 memset(buf, 0, zero_bytes);
2895 qemu_iovec_add(qiov, buf, zero_bytes);
2896 mcb->callbacks[i].free_buf = buf;
2897 }
2898
2899 // Add the second request
2900 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2901
2902 reqs[outidx].nb_sectors = qiov->size >> 9;
2903 reqs[outidx].qiov = qiov;
2904
2905 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2906 } else {
2907 outidx++;
2908 reqs[outidx].sector = reqs[i].sector;
2909 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2910 reqs[outidx].qiov = reqs[i].qiov;
2911 }
2912 }
2913
2914 return outidx + 1;
2915 }
2916
2917 /*
2918 * Submit multiple AIO write requests at once.
2919 *
2920 * On success, the function returns 0 and all requests in the reqs array have
2921 * been submitted. In error case this function returns -1, and any of the
2922 * requests may or may not be submitted yet. In particular, this means that the
2923 * callback will be called for some of the requests, for others it won't. The
2924 * caller must check the error field of the BlockRequest to wait for the right
2925 * callbacks (if error != 0, no callback will be called).
2926 *
2927 * The implementation may modify the contents of the reqs array, e.g. to merge
2928 * requests. However, the fields opaque and error are left unmodified as they
2929 * are used to signal failure for a single request to the caller.
2930 */
2931 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2932 {
2933 MultiwriteCB *mcb;
2934 int i;
2935
2936 /* don't submit writes if we don't have a medium */
2937 if (bs->drv == NULL) {
2938 for (i = 0; i < num_reqs; i++) {
2939 reqs[i].error = -ENOMEDIUM;
2940 }
2941 return -1;
2942 }
2943
2944 if (num_reqs == 0) {
2945 return 0;
2946 }
2947
2948 // Create MultiwriteCB structure
2949 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2950 mcb->num_requests = 0;
2951 mcb->num_callbacks = num_reqs;
2952
2953 for (i = 0; i < num_reqs; i++) {
2954 mcb->callbacks[i].cb = reqs[i].cb;
2955 mcb->callbacks[i].opaque = reqs[i].opaque;
2956 }
2957
2958 // Check for mergable requests
2959 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2960
2961 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2962
2963 /* Run the aio requests. */
2964 mcb->num_requests = num_reqs;
2965 for (i = 0; i < num_reqs; i++) {
2966 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2967 reqs[i].nb_sectors, multiwrite_cb, mcb);
2968 }
2969
2970 return 0;
2971 }
2972
2973 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2974 {
2975 acb->pool->cancel(acb);
2976 }
2977
2978 /* block I/O throttling */
2979 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
2980 bool is_write, double elapsed_time, uint64_t *wait)
2981 {
2982 uint64_t bps_limit = 0;
2983 double bytes_limit, bytes_base, bytes_res;
2984 double slice_time, wait_time;
2985
2986 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
2987 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2988 } else if (bs->io_limits.bps[is_write]) {
2989 bps_limit = bs->io_limits.bps[is_write];
2990 } else {
2991 if (wait) {
2992 *wait = 0;
2993 }
2994
2995 return false;
2996 }
2997
2998 slice_time = bs->slice_end - bs->slice_start;
2999 slice_time /= (NANOSECONDS_PER_SECOND);
3000 bytes_limit = bps_limit * slice_time;
3001 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
3002 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3003 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
3004 }
3005
3006 /* bytes_base: the bytes of data which have been read/written; and
3007 * it is obtained from the history statistic info.
3008 * bytes_res: the remaining bytes of data which need to be read/written.
3009 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3010 * the total time for completing reading/writting all data.
3011 */
3012 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
3013
3014 if (bytes_base + bytes_res <= bytes_limit) {
3015 if (wait) {
3016 *wait = 0;
3017 }
3018
3019 return false;
3020 }
3021
3022 /* Calc approx time to dispatch */
3023 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
3024
3025 /* When the I/O rate at runtime exceeds the limits,
3026 * bs->slice_end need to be extended in order that the current statistic
3027 * info can be kept until the timer fire, so it is increased and tuned
3028 * based on the result of experiment.
3029 */
3030 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3031 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3032 if (wait) {
3033 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3034 }
3035
3036 return true;
3037 }
3038
3039 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
3040 double elapsed_time, uint64_t *wait)
3041 {
3042 uint64_t iops_limit = 0;
3043 double ios_limit, ios_base;
3044 double slice_time, wait_time;
3045
3046 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3047 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
3048 } else if (bs->io_limits.iops[is_write]) {
3049 iops_limit = bs->io_limits.iops[is_write];
3050 } else {
3051 if (wait) {
3052 *wait = 0;
3053 }
3054
3055 return false;
3056 }
3057
3058 slice_time = bs->slice_end - bs->slice_start;
3059 slice_time /= (NANOSECONDS_PER_SECOND);
3060 ios_limit = iops_limit * slice_time;
3061 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
3062 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3063 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
3064 }
3065
3066 if (ios_base + 1 <= ios_limit) {
3067 if (wait) {
3068 *wait = 0;
3069 }
3070
3071 return false;
3072 }
3073
3074 /* Calc approx time to dispatch */
3075 wait_time = (ios_base + 1) / iops_limit;
3076 if (wait_time > elapsed_time) {
3077 wait_time = wait_time - elapsed_time;
3078 } else {
3079 wait_time = 0;
3080 }
3081
3082 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3083 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3084 if (wait) {
3085 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3086 }
3087
3088 return true;
3089 }
3090
3091 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3092 bool is_write, int64_t *wait)
3093 {
3094 int64_t now, max_wait;
3095 uint64_t bps_wait = 0, iops_wait = 0;
3096 double elapsed_time;
3097 int bps_ret, iops_ret;
3098
3099 now = qemu_get_clock_ns(vm_clock);
3100 if ((bs->slice_start < now)
3101 && (bs->slice_end > now)) {
3102 bs->slice_end = now + bs->slice_time;
3103 } else {
3104 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3105 bs->slice_start = now;
3106 bs->slice_end = now + bs->slice_time;
3107
3108 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3109 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3110
3111 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3112 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3113 }
3114
3115 elapsed_time = now - bs->slice_start;
3116 elapsed_time /= (NANOSECONDS_PER_SECOND);
3117
3118 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3119 is_write, elapsed_time, &bps_wait);
3120 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3121 elapsed_time, &iops_wait);
3122 if (bps_ret || iops_ret) {
3123 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3124 if (wait) {
3125 *wait = max_wait;
3126 }
3127
3128 now = qemu_get_clock_ns(vm_clock);
3129 if (bs->slice_end < now + max_wait) {
3130 bs->slice_end = now + max_wait;
3131 }
3132
3133 return true;
3134 }
3135
3136 if (wait) {
3137 *wait = 0;
3138 }
3139
3140 return false;
3141 }
3142
3143 /**************************************************************/
3144 /* async block device emulation */
3145
3146 typedef struct BlockDriverAIOCBSync {
3147 BlockDriverAIOCB common;
3148 QEMUBH *bh;
3149 int ret;
3150 /* vector translation state */
3151 QEMUIOVector *qiov;
3152 uint8_t *bounce;
3153 int is_write;
3154 } BlockDriverAIOCBSync;
3155
3156 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3157 {
3158 BlockDriverAIOCBSync *acb =
3159 container_of(blockacb, BlockDriverAIOCBSync, common);
3160 qemu_bh_delete(acb->bh);
3161 acb->bh = NULL;
3162 qemu_aio_release(acb);
3163 }
3164
3165 static AIOPool bdrv_em_aio_pool = {
3166 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3167 .cancel = bdrv_aio_cancel_em,
3168 };
3169
3170 static void bdrv_aio_bh_cb(void *opaque)
3171 {
3172 BlockDriverAIOCBSync *acb = opaque;
3173
3174 if (!acb->is_write)
3175 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
3176 qemu_vfree(acb->bounce);
3177 acb->common.cb(acb->common.opaque, acb->ret);
3178 qemu_bh_delete(acb->bh);
3179 acb->bh = NULL;
3180 qemu_aio_release(acb);
3181 }
3182
3183 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3184 int64_t sector_num,
3185 QEMUIOVector *qiov,
3186 int nb_sectors,
3187 BlockDriverCompletionFunc *cb,
3188 void *opaque,
3189 int is_write)
3190
3191 {
3192 BlockDriverAIOCBSync *acb;
3193
3194 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
3195 acb->is_write = is_write;
3196 acb->qiov = qiov;
3197 acb->bounce = qemu_blockalign(bs, qiov->size);
3198 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
3199
3200 if (is_write) {
3201 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
3202 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
3203 } else {
3204 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
3205 }
3206
3207 qemu_bh_schedule(acb->bh);
3208
3209 return &acb->common;
3210 }
3211
3212 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3213 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3214 BlockDriverCompletionFunc *cb, void *opaque)
3215 {
3216 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
3217 }
3218
3219 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3220 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3221 BlockDriverCompletionFunc *cb, void *opaque)
3222 {
3223 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3224 }
3225
3226
3227 typedef struct BlockDriverAIOCBCoroutine {
3228 BlockDriverAIOCB common;
3229 BlockRequest req;
3230 bool is_write;
3231 QEMUBH* bh;
3232 } BlockDriverAIOCBCoroutine;
3233
3234 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3235 {
3236 qemu_aio_flush();
3237 }
3238
3239 static AIOPool bdrv_em_co_aio_pool = {
3240 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3241 .cancel = bdrv_aio_co_cancel_em,
3242 };
3243
3244 static void bdrv_co_em_bh(void *opaque)
3245 {
3246 BlockDriverAIOCBCoroutine *acb = opaque;
3247
3248 acb->common.cb(acb->common.opaque, acb->req.error);
3249 qemu_bh_delete(acb->bh);
3250 qemu_aio_release(acb);
3251 }
3252
3253 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3254 static void coroutine_fn bdrv_co_do_rw(void *opaque)
3255 {
3256 BlockDriverAIOCBCoroutine *acb = opaque;
3257 BlockDriverState *bs = acb->common.bs;
3258
3259 if (!acb->is_write) {
3260 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
3261 acb->req.nb_sectors, acb->req.qiov, 0);
3262 } else {
3263 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
3264 acb->req.nb_sectors, acb->req.qiov, 0);
3265 }
3266
3267 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3268 qemu_bh_schedule(acb->bh);
3269 }
3270
3271 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3272 int64_t sector_num,
3273 QEMUIOVector *qiov,
3274 int nb_sectors,
3275 BlockDriverCompletionFunc *cb,
3276 void *opaque,
3277 bool is_write)
3278 {
3279 Coroutine *co;
3280 BlockDriverAIOCBCoroutine *acb;
3281
3282 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3283 acb->req.sector = sector_num;
3284 acb->req.nb_sectors = nb_sectors;
3285 acb->req.qiov = qiov;
3286 acb->is_write = is_write;
3287
3288 co = qemu_coroutine_create(bdrv_co_do_rw);
3289 qemu_coroutine_enter(co, acb);
3290
3291 return &acb->common;
3292 }
3293
3294 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
3295 {
3296 BlockDriverAIOCBCoroutine *acb = opaque;
3297 BlockDriverState *bs = acb->common.bs;
3298
3299 acb->req.error = bdrv_co_flush(bs);
3300 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3301 qemu_bh_schedule(acb->bh);
3302 }
3303
3304 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
3305 BlockDriverCompletionFunc *cb, void *opaque)
3306 {
3307 trace_bdrv_aio_flush(bs, opaque);
3308
3309 Coroutine *co;
3310 BlockDriverAIOCBCoroutine *acb;
3311
3312 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3313 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3314 qemu_coroutine_enter(co, acb);
3315
3316 return &acb->common;
3317 }
3318
3319 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3320 {
3321 BlockDriverAIOCBCoroutine *acb = opaque;
3322 BlockDriverState *bs = acb->common.bs;
3323
3324 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3325 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3326 qemu_bh_schedule(acb->bh);
3327 }
3328
3329 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3330 int64_t sector_num, int nb_sectors,
3331 BlockDriverCompletionFunc *cb, void *opaque)
3332 {
3333 Coroutine *co;
3334 BlockDriverAIOCBCoroutine *acb;
3335
3336 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3337
3338 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3339 acb->req.sector = sector_num;
3340 acb->req.nb_sectors = nb_sectors;
3341 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3342 qemu_coroutine_enter(co, acb);
3343
3344 return &acb->common;
3345 }
3346
3347 void bdrv_init(void)
3348 {
3349 module_call_init(MODULE_INIT_BLOCK);
3350 }
3351
3352 void bdrv_init_with_whitelist(void)
3353 {
3354 use_bdrv_whitelist = 1;
3355 bdrv_init();
3356 }
3357
3358 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3359 BlockDriverCompletionFunc *cb, void *opaque)
3360 {
3361 BlockDriverAIOCB *acb;
3362
3363 if (pool->free_aiocb) {
3364 acb = pool->free_aiocb;
3365 pool->free_aiocb = acb->next;
3366 } else {
3367 acb = g_malloc0(pool->aiocb_size);
3368 acb->pool = pool;
3369 }
3370 acb->bs = bs;
3371 acb->cb = cb;
3372 acb->opaque = opaque;
3373 return acb;
3374 }
3375
3376 void qemu_aio_release(void *p)
3377 {
3378 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3379 AIOPool *pool = acb->pool;
3380 acb->next = pool->free_aiocb;
3381 pool->free_aiocb = acb;
3382 }
3383
3384 /**************************************************************/
3385 /* Coroutine block device emulation */
3386
3387 typedef struct CoroutineIOCompletion {
3388 Coroutine *coroutine;
3389 int ret;
3390 } CoroutineIOCompletion;
3391
3392 static void bdrv_co_io_em_complete(void *opaque, int ret)
3393 {
3394 CoroutineIOCompletion *co = opaque;
3395
3396 co->ret = ret;
3397 qemu_coroutine_enter(co->coroutine, NULL);
3398 }
3399
3400 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3401 int nb_sectors, QEMUIOVector *iov,
3402 bool is_write)
3403 {
3404 CoroutineIOCompletion co = {
3405 .coroutine = qemu_coroutine_self(),
3406 };
3407 BlockDriverAIOCB *acb;
3408
3409 if (is_write) {
3410 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3411 bdrv_co_io_em_complete, &co);
3412 } else {
3413 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3414 bdrv_co_io_em_complete, &co);
3415 }
3416
3417 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
3418 if (!acb) {
3419 return -EIO;
3420 }
3421 qemu_coroutine_yield();
3422
3423 return co.ret;
3424 }
3425
3426 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3427 int64_t sector_num, int nb_sectors,
3428 QEMUIOVector *iov)
3429 {
3430 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3431 }
3432
3433 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3434 int64_t sector_num, int nb_sectors,
3435 QEMUIOVector *iov)
3436 {
3437 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3438 }
3439
3440 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
3441 {
3442 RwCo *rwco = opaque;
3443
3444 rwco->ret = bdrv_co_flush(rwco->bs);
3445 }
3446
3447 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3448 {
3449 int ret;
3450
3451 if (!bs->drv) {
3452 return 0;
3453 }
3454
3455 /* Write back cached data to the OS even with cache=unsafe */
3456 if (bs->drv->bdrv_co_flush_to_os) {
3457 ret = bs->drv->bdrv_co_flush_to_os(bs);
3458 if (ret < 0) {
3459 return ret;
3460 }
3461 }
3462
3463 /* But don't actually force it to the disk with cache=unsafe */
3464 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3465 return 0;
3466 }
3467
3468 if (bs->drv->bdrv_co_flush_to_disk) {
3469 return bs->drv->bdrv_co_flush_to_disk(bs);
3470 } else if (bs->drv->bdrv_aio_flush) {
3471 BlockDriverAIOCB *acb;
3472 CoroutineIOCompletion co = {
3473 .coroutine = qemu_coroutine_self(),
3474 };
3475
3476 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3477 if (acb == NULL) {
3478 return -EIO;
3479 } else {
3480 qemu_coroutine_yield();
3481 return co.ret;
3482 }
3483 } else {
3484 /*
3485 * Some block drivers always operate in either writethrough or unsafe
3486 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3487 * know how the server works (because the behaviour is hardcoded or
3488 * depends on server-side configuration), so we can't ensure that
3489 * everything is safe on disk. Returning an error doesn't work because
3490 * that would break guests even if the server operates in writethrough
3491 * mode.
3492 *
3493 * Let's hope the user knows what he's doing.
3494 */
3495 return 0;
3496 }
3497 }
3498
3499 void bdrv_invalidate_cache(BlockDriverState *bs)
3500 {
3501 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3502 bs->drv->bdrv_invalidate_cache(bs);
3503 }
3504 }
3505
3506 void bdrv_invalidate_cache_all(void)
3507 {
3508 BlockDriverState *bs;
3509
3510 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3511 bdrv_invalidate_cache(bs);
3512 }
3513 }
3514
3515 int bdrv_flush(BlockDriverState *bs)
3516 {
3517 Coroutine *co;
3518 RwCo rwco = {
3519 .bs = bs,
3520 .ret = NOT_DONE,
3521 };
3522
3523 if (qemu_in_coroutine()) {
3524 /* Fast-path if already in coroutine context */
3525 bdrv_flush_co_entry(&rwco);
3526 } else {
3527 co = qemu_coroutine_create(bdrv_flush_co_entry);
3528 qemu_coroutine_enter(co, &rwco);
3529 while (rwco.ret == NOT_DONE) {
3530 qemu_aio_wait();
3531 }
3532 }
3533
3534 return rwco.ret;
3535 }
3536
3537 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3538 {
3539 RwCo *rwco = opaque;
3540
3541 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3542 }
3543
3544 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3545 int nb_sectors)
3546 {
3547 if (!bs->drv) {
3548 return -ENOMEDIUM;
3549 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3550 return -EIO;
3551 } else if (bs->read_only) {
3552 return -EROFS;
3553 } else if (bs->drv->bdrv_co_discard) {
3554 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3555 } else if (bs->drv->bdrv_aio_discard) {
3556 BlockDriverAIOCB *acb;
3557 CoroutineIOCompletion co = {
3558 .coroutine = qemu_coroutine_self(),
3559 };
3560
3561 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3562 bdrv_co_io_em_complete, &co);
3563 if (acb == NULL) {
3564 return -EIO;
3565 } else {
3566 qemu_coroutine_yield();
3567 return co.ret;
3568 }
3569 } else {
3570 return 0;
3571 }
3572 }
3573
3574 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3575 {
3576 Coroutine *co;
3577 RwCo rwco = {
3578 .bs = bs,
3579 .sector_num = sector_num,
3580 .nb_sectors = nb_sectors,
3581 .ret = NOT_DONE,
3582 };
3583
3584 if (qemu_in_coroutine()) {
3585 /* Fast-path if already in coroutine context */
3586 bdrv_discard_co_entry(&rwco);
3587 } else {
3588 co = qemu_coroutine_create(bdrv_discard_co_entry);
3589 qemu_coroutine_enter(co, &rwco);
3590 while (rwco.ret == NOT_DONE) {
3591 qemu_aio_wait();
3592 }
3593 }
3594
3595 return rwco.ret;
3596 }
3597
3598 /**************************************************************/
3599 /* removable device support */
3600
3601 /**
3602 * Return TRUE if the media is present
3603 */
3604 int bdrv_is_inserted(BlockDriverState *bs)
3605 {
3606 BlockDriver *drv = bs->drv;
3607
3608 if (!drv)
3609 return 0;
3610 if (!drv->bdrv_is_inserted)
3611 return 1;
3612 return drv->bdrv_is_inserted(bs);
3613 }
3614
3615 /**
3616 * Return whether the media changed since the last call to this
3617 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3618 */
3619 int bdrv_media_changed(BlockDriverState *bs)
3620 {
3621 BlockDriver *drv = bs->drv;
3622
3623 if (drv && drv->bdrv_media_changed) {
3624 return drv->bdrv_media_changed(bs);
3625 }
3626 return -ENOTSUP;
3627 }
3628
3629 /**
3630 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3631 */
3632 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
3633 {
3634 BlockDriver *drv = bs->drv;
3635
3636 if (drv && drv->bdrv_eject) {
3637 drv->bdrv_eject(bs, eject_flag);
3638 }
3639
3640 if (bs->device_name[0] != '\0') {
3641 bdrv_emit_qmp_eject_event(bs, eject_flag);
3642 }
3643 }
3644
3645 /**
3646 * Lock or unlock the media (if it is locked, the user won't be able
3647 * to eject it manually).
3648 */
3649 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
3650 {
3651 BlockDriver *drv = bs->drv;
3652
3653 trace_bdrv_lock_medium(bs, locked);
3654
3655 if (drv && drv->bdrv_lock_medium) {
3656 drv->bdrv_lock_medium(bs, locked);
3657 }
3658 }
3659
3660 /* needed for generic scsi interface */
3661
3662 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3663 {
3664 BlockDriver *drv = bs->drv;
3665
3666 if (drv && drv->bdrv_ioctl)
3667 return drv->bdrv_ioctl(bs, req, buf);
3668 return -ENOTSUP;
3669 }
3670
3671 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3672 unsigned long int req, void *buf,
3673 BlockDriverCompletionFunc *cb, void *opaque)
3674 {
3675 BlockDriver *drv = bs->drv;
3676
3677 if (drv && drv->bdrv_aio_ioctl)
3678 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3679 return NULL;
3680 }
3681
3682 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3683 {
3684 bs->buffer_alignment = align;
3685 }
3686
3687 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3688 {
3689 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3690 }
3691
3692 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3693 {
3694 int64_t bitmap_size;
3695
3696 bs->dirty_count = 0;
3697 if (enable) {
3698 if (!bs->dirty_bitmap) {
3699 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
3700 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
3701 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
3702
3703 bs->dirty_bitmap = g_malloc0(bitmap_size);
3704 }
3705 } else {
3706 if (bs->dirty_bitmap) {
3707 g_free(bs->dirty_bitmap);
3708 bs->dirty_bitmap = NULL;
3709 }
3710 }
3711 }
3712
3713 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
3714 {
3715 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
3716
3717 if (bs->dirty_bitmap &&
3718 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
3719 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
3720 (1UL << (chunk % (sizeof(unsigned long) * 8))));
3721 } else {
3722 return 0;
3723 }
3724 }
3725
3726 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
3727 int nr_sectors)
3728 {
3729 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
3730 }
3731
3732 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
3733 {
3734 return bs->dirty_count;
3735 }
3736
3737 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
3738 {
3739 assert(bs->in_use != in_use);
3740 bs->in_use = in_use;
3741 }
3742
3743 int bdrv_in_use(BlockDriverState *bs)
3744 {
3745 return bs->in_use;
3746 }
3747
3748 void bdrv_iostatus_enable(BlockDriverState *bs)
3749 {
3750 bs->iostatus_enabled = true;
3751 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3752 }
3753
3754 /* The I/O status is only enabled if the drive explicitly
3755 * enables it _and_ the VM is configured to stop on errors */
3756 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
3757 {
3758 return (bs->iostatus_enabled &&
3759 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
3760 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
3761 bs->on_read_error == BLOCK_ERR_STOP_ANY));
3762 }
3763
3764 void bdrv_iostatus_disable(BlockDriverState *bs)
3765 {
3766 bs->iostatus_enabled = false;
3767 }
3768
3769 void bdrv_iostatus_reset(BlockDriverState *bs)
3770 {
3771 if (bdrv_iostatus_is_enabled(bs)) {
3772 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3773 }
3774 }
3775
3776 /* XXX: Today this is set by device models because it makes the implementation
3777 quite simple. However, the block layer knows about the error, so it's
3778 possible to implement this without device models being involved */
3779 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
3780 {
3781 if (bdrv_iostatus_is_enabled(bs) &&
3782 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
3783 assert(error >= 0);
3784 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
3785 BLOCK_DEVICE_IO_STATUS_FAILED;
3786 }
3787 }
3788
3789 void
3790 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
3791 enum BlockAcctType type)
3792 {
3793 assert(type < BDRV_MAX_IOTYPE);
3794
3795 cookie->bytes = bytes;
3796 cookie->start_time_ns = get_clock();
3797 cookie->type = type;
3798 }
3799
3800 void
3801 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
3802 {
3803 assert(cookie->type < BDRV_MAX_IOTYPE);
3804
3805 bs->nr_bytes[cookie->type] += cookie->bytes;
3806 bs->nr_ops[cookie->type]++;
3807 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
3808 }
3809
3810 int bdrv_img_create(const char *filename, const char *fmt,
3811 const char *base_filename, const char *base_fmt,
3812 char *options, uint64_t img_size, int flags)
3813 {
3814 QEMUOptionParameter *param = NULL, *create_options = NULL;
3815 QEMUOptionParameter *backing_fmt, *backing_file, *size;
3816 BlockDriverState *bs = NULL;
3817 BlockDriver *drv, *proto_drv;
3818 BlockDriver *backing_drv = NULL;
3819 int ret = 0;
3820
3821 /* Find driver and parse its options */
3822 drv = bdrv_find_format(fmt);
3823 if (!drv) {
3824 error_report("Unknown file format '%s'", fmt);
3825 ret = -EINVAL;
3826 goto out;
3827 }
3828
3829 proto_drv = bdrv_find_protocol(filename);
3830 if (!proto_drv) {
3831 error_report("Unknown protocol '%s'", filename);
3832 ret = -EINVAL;
3833 goto out;
3834 }
3835
3836 create_options = append_option_parameters(create_options,
3837 drv->create_options);
3838 create_options = append_option_parameters(create_options,
3839 proto_drv->create_options);
3840
3841 /* Create parameter list with default values */
3842 param = parse_option_parameters("", create_options, param);
3843
3844 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
3845
3846 /* Parse -o options */
3847 if (options) {
3848 param = parse_option_parameters(options, create_options, param);
3849 if (param == NULL) {
3850 error_report("Invalid options for file format '%s'.", fmt);
3851 ret = -EINVAL;
3852 goto out;
3853 }
3854 }
3855
3856 if (base_filename) {
3857 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
3858 base_filename)) {
3859 error_report("Backing file not supported for file format '%s'",
3860 fmt);
3861 ret = -EINVAL;
3862 goto out;
3863 }
3864 }
3865
3866 if (base_fmt) {
3867 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
3868 error_report("Backing file format not supported for file "
3869 "format '%s'", fmt);
3870 ret = -EINVAL;
3871 goto out;
3872 }
3873 }
3874
3875 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
3876 if (backing_file && backing_file->value.s) {
3877 if (!strcmp(filename, backing_file->value.s)) {
3878 error_report("Error: Trying to create an image with the "
3879 "same filename as the backing file");
3880 ret = -EINVAL;
3881 goto out;
3882 }
3883 }
3884
3885 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
3886 if (backing_fmt && backing_fmt->value.s) {
3887 backing_drv = bdrv_find_format(backing_fmt->value.s);
3888 if (!backing_drv) {
3889 error_report("Unknown backing file format '%s'",
3890 backing_fmt->value.s);
3891 ret = -EINVAL;
3892 goto out;
3893 }
3894 }
3895
3896 // The size for the image must always be specified, with one exception:
3897 // If we are using a backing file, we can obtain the size from there
3898 size = get_option_parameter(param, BLOCK_OPT_SIZE);
3899 if (size && size->value.n == -1) {
3900 if (backing_file && backing_file->value.s) {
3901 uint64_t size;
3902 char buf[32];
3903
3904 bs = bdrv_new("");
3905
3906 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);
3907 if (ret < 0) {
3908 error_report("Could not open '%s'", backing_file->value.s);
3909 goto out;
3910 }
3911 bdrv_get_geometry(bs, &size);
3912 size *= 512;
3913
3914 snprintf(buf, sizeof(buf), "%" PRId64, size);
3915 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
3916 } else {
3917 error_report("Image creation needs a size parameter");
3918 ret = -EINVAL;
3919 goto out;
3920 }
3921 }
3922
3923 printf("Formatting '%s', fmt=%s ", filename, fmt);
3924 print_option_parameters(param);
3925 puts("");
3926
3927 ret = bdrv_create(drv, filename, param);
3928
3929 if (ret < 0) {
3930 if (ret == -ENOTSUP) {
3931 error_report("Formatting or formatting option not supported for "
3932 "file format '%s'", fmt);
3933 } else if (ret == -EFBIG) {
3934 error_report("The image size is too large for file format '%s'",
3935 fmt);
3936 } else {
3937 error_report("%s: error while creating %s: %s", filename, fmt,
3938 strerror(-ret));
3939 }
3940 }
3941
3942 out:
3943 free_option_parameters(create_options);
3944 free_option_parameters(param);
3945
3946 if (bs) {
3947 bdrv_delete(bs);
3948 }
3949
3950 return ret;
3951 }
3952
3953 void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
3954 BlockDriverCompletionFunc *cb, void *opaque)
3955 {
3956 BlockJob *job;
3957
3958 if (bs->job || bdrv_in_use(bs)) {
3959 return NULL;
3960 }
3961 bdrv_set_in_use(bs, 1);
3962
3963 job = g_malloc0(job_type->instance_size);
3964 job->job_type = job_type;
3965 job->bs = bs;
3966 job->cb = cb;
3967 job->opaque = opaque;
3968 bs->job = job;
3969 return job;
3970 }
3971
3972 void block_job_complete(BlockJob *job, int ret)
3973 {
3974 BlockDriverState *bs = job->bs;
3975
3976 assert(bs->job == job);
3977 job->cb(job->opaque, ret);
3978 bs->job = NULL;
3979 g_free(job);
3980 bdrv_set_in_use(bs, 0);
3981 }
3982
3983 int block_job_set_speed(BlockJob *job, int64_t value)
3984 {
3985 if (!job->job_type->set_speed) {
3986 return -ENOTSUP;
3987 }
3988 return job->job_type->set_speed(job, value);
3989 }
3990
3991 void block_job_cancel(BlockJob *job)
3992 {
3993 job->cancelled = true;
3994 }
3995
3996 bool block_job_is_cancelled(BlockJob *job)
3997 {
3998 return job->cancelled;
3999 }