]> git.proxmox.com Git - qemu.git/blob - block.c
qapi schema: add Netdev types
[qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qjson.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
34
35 #ifdef CONFIG_BSD
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
40 #ifndef __DragonFly__
41 #include <sys/disk.h>
42 #endif
43 #endif
44
45 #ifdef _WIN32
46 #include <windows.h>
47 #endif
48
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
51 typedef enum {
52 BDRV_REQ_COPY_ON_READ = 0x1,
53 BDRV_REQ_ZERO_WRITE = 0x2,
54 } BdrvRequestFlags;
55
56 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
57 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
58 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
59 BlockDriverCompletionFunc *cb, void *opaque);
60 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
62 BlockDriverCompletionFunc *cb, void *opaque);
63 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
64 int64_t sector_num, int nb_sectors,
65 QEMUIOVector *iov);
66 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
69 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
70 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
71 BdrvRequestFlags flags);
72 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
75 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
76 int64_t sector_num,
77 QEMUIOVector *qiov,
78 int nb_sectors,
79 BlockDriverCompletionFunc *cb,
80 void *opaque,
81 bool is_write);
82 static void coroutine_fn bdrv_co_do_rw(void *opaque);
83 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
84 int64_t sector_num, int nb_sectors);
85
86 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
87 bool is_write, double elapsed_time, uint64_t *wait);
88 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
89 double elapsed_time, uint64_t *wait);
90 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
91 bool is_write, int64_t *wait);
92
93 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
94 QTAILQ_HEAD_INITIALIZER(bdrv_states);
95
96 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
97 QLIST_HEAD_INITIALIZER(bdrv_drivers);
98
99 /* The device to use for VM snapshots */
100 static BlockDriverState *bs_snapshots;
101
102 /* If non-zero, use only whitelisted block drivers */
103 static int use_bdrv_whitelist;
104
105 #ifdef _WIN32
106 static int is_windows_drive_prefix(const char *filename)
107 {
108 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
109 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
110 filename[1] == ':');
111 }
112
113 int is_windows_drive(const char *filename)
114 {
115 if (is_windows_drive_prefix(filename) &&
116 filename[2] == '\0')
117 return 1;
118 if (strstart(filename, "\\\\.\\", NULL) ||
119 strstart(filename, "//./", NULL))
120 return 1;
121 return 0;
122 }
123 #endif
124
125 /* throttling disk I/O limits */
126 void bdrv_io_limits_disable(BlockDriverState *bs)
127 {
128 bs->io_limits_enabled = false;
129
130 while (qemu_co_queue_next(&bs->throttled_reqs));
131
132 if (bs->block_timer) {
133 qemu_del_timer(bs->block_timer);
134 qemu_free_timer(bs->block_timer);
135 bs->block_timer = NULL;
136 }
137
138 bs->slice_start = 0;
139 bs->slice_end = 0;
140 bs->slice_time = 0;
141 memset(&bs->io_base, 0, sizeof(bs->io_base));
142 }
143
144 static void bdrv_block_timer(void *opaque)
145 {
146 BlockDriverState *bs = opaque;
147
148 qemu_co_queue_next(&bs->throttled_reqs);
149 }
150
151 void bdrv_io_limits_enable(BlockDriverState *bs)
152 {
153 qemu_co_queue_init(&bs->throttled_reqs);
154 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
155 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
156 bs->slice_start = qemu_get_clock_ns(vm_clock);
157 bs->slice_end = bs->slice_start + bs->slice_time;
158 memset(&bs->io_base, 0, sizeof(bs->io_base));
159 bs->io_limits_enabled = true;
160 }
161
162 bool bdrv_io_limits_enabled(BlockDriverState *bs)
163 {
164 BlockIOLimit *io_limits = &bs->io_limits;
165 return io_limits->bps[BLOCK_IO_LIMIT_READ]
166 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
167 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
168 || io_limits->iops[BLOCK_IO_LIMIT_READ]
169 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
170 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
171 }
172
173 static void bdrv_io_limits_intercept(BlockDriverState *bs,
174 bool is_write, int nb_sectors)
175 {
176 int64_t wait_time = -1;
177
178 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
179 qemu_co_queue_wait(&bs->throttled_reqs);
180 }
181
182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
183 * throttled requests will not be dequeued until the current request is
184 * allowed to be serviced. So if the current request still exceeds the
185 * limits, it will be inserted to the head. All requests followed it will
186 * be still in throttled_reqs queue.
187 */
188
189 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
190 qemu_mod_timer(bs->block_timer,
191 wait_time + qemu_get_clock_ns(vm_clock));
192 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
193 }
194
195 qemu_co_queue_next(&bs->throttled_reqs);
196 }
197
198 /* check if the path starts with "<protocol>:" */
199 static int path_has_protocol(const char *path)
200 {
201 const char *p;
202
203 #ifdef _WIN32
204 if (is_windows_drive(path) ||
205 is_windows_drive_prefix(path)) {
206 return 0;
207 }
208 p = path + strcspn(path, ":/\\");
209 #else
210 p = path + strcspn(path, ":/");
211 #endif
212
213 return *p == ':';
214 }
215
216 int path_is_absolute(const char *path)
217 {
218 #ifdef _WIN32
219 /* specific case for names like: "\\.\d:" */
220 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
221 return 1;
222 }
223 return (*path == '/' || *path == '\\');
224 #else
225 return (*path == '/');
226 #endif
227 }
228
229 /* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
231 supported. */
232 void path_combine(char *dest, int dest_size,
233 const char *base_path,
234 const char *filename)
235 {
236 const char *p, *p1;
237 int len;
238
239 if (dest_size <= 0)
240 return;
241 if (path_is_absolute(filename)) {
242 pstrcpy(dest, dest_size, filename);
243 } else {
244 p = strchr(base_path, ':');
245 if (p)
246 p++;
247 else
248 p = base_path;
249 p1 = strrchr(base_path, '/');
250 #ifdef _WIN32
251 {
252 const char *p2;
253 p2 = strrchr(base_path, '\\');
254 if (!p1 || p2 > p1)
255 p1 = p2;
256 }
257 #endif
258 if (p1)
259 p1++;
260 else
261 p1 = base_path;
262 if (p1 > p)
263 p = p1;
264 len = p - base_path;
265 if (len > dest_size - 1)
266 len = dest_size - 1;
267 memcpy(dest, base_path, len);
268 dest[len] = '\0';
269 pstrcat(dest, dest_size, filename);
270 }
271 }
272
273 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
274 {
275 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
276 pstrcpy(dest, sz, bs->backing_file);
277 } else {
278 path_combine(dest, sz, bs->filename, bs->backing_file);
279 }
280 }
281
282 void bdrv_register(BlockDriver *bdrv)
283 {
284 /* Block drivers without coroutine functions need emulation */
285 if (!bdrv->bdrv_co_readv) {
286 bdrv->bdrv_co_readv = bdrv_co_readv_em;
287 bdrv->bdrv_co_writev = bdrv_co_writev_em;
288
289 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
290 * the block driver lacks aio we need to emulate that too.
291 */
292 if (!bdrv->bdrv_aio_readv) {
293 /* add AIO emulation layer */
294 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
295 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
296 }
297 }
298
299 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
300 }
301
302 /* create a new block device (by default it is empty) */
303 BlockDriverState *bdrv_new(const char *device_name)
304 {
305 BlockDriverState *bs;
306
307 bs = g_malloc0(sizeof(BlockDriverState));
308 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
309 if (device_name[0] != '\0') {
310 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
311 }
312 bdrv_iostatus_disable(bs);
313 return bs;
314 }
315
316 BlockDriver *bdrv_find_format(const char *format_name)
317 {
318 BlockDriver *drv1;
319 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
320 if (!strcmp(drv1->format_name, format_name)) {
321 return drv1;
322 }
323 }
324 return NULL;
325 }
326
327 static int bdrv_is_whitelisted(BlockDriver *drv)
328 {
329 static const char *whitelist[] = {
330 CONFIG_BDRV_WHITELIST
331 };
332 const char **p;
333
334 if (!whitelist[0])
335 return 1; /* no whitelist, anything goes */
336
337 for (p = whitelist; *p; p++) {
338 if (!strcmp(drv->format_name, *p)) {
339 return 1;
340 }
341 }
342 return 0;
343 }
344
345 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
346 {
347 BlockDriver *drv = bdrv_find_format(format_name);
348 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
349 }
350
351 typedef struct CreateCo {
352 BlockDriver *drv;
353 char *filename;
354 QEMUOptionParameter *options;
355 int ret;
356 } CreateCo;
357
358 static void coroutine_fn bdrv_create_co_entry(void *opaque)
359 {
360 CreateCo *cco = opaque;
361 assert(cco->drv);
362
363 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
364 }
365
366 int bdrv_create(BlockDriver *drv, const char* filename,
367 QEMUOptionParameter *options)
368 {
369 int ret;
370
371 Coroutine *co;
372 CreateCo cco = {
373 .drv = drv,
374 .filename = g_strdup(filename),
375 .options = options,
376 .ret = NOT_DONE,
377 };
378
379 if (!drv->bdrv_create) {
380 return -ENOTSUP;
381 }
382
383 if (qemu_in_coroutine()) {
384 /* Fast-path if already in coroutine context */
385 bdrv_create_co_entry(&cco);
386 } else {
387 co = qemu_coroutine_create(bdrv_create_co_entry);
388 qemu_coroutine_enter(co, &cco);
389 while (cco.ret == NOT_DONE) {
390 qemu_aio_wait();
391 }
392 }
393
394 ret = cco.ret;
395 g_free(cco.filename);
396
397 return ret;
398 }
399
400 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
401 {
402 BlockDriver *drv;
403
404 drv = bdrv_find_protocol(filename);
405 if (drv == NULL) {
406 return -ENOENT;
407 }
408
409 return bdrv_create(drv, filename, options);
410 }
411
412 /*
413 * Create a uniquely-named empty temporary file.
414 * Return 0 upon success, otherwise a negative errno value.
415 */
416 int get_tmp_filename(char *filename, int size)
417 {
418 #ifdef _WIN32
419 char temp_dir[MAX_PATH];
420 /* GetTempFileName requires that its output buffer (4th param)
421 have length MAX_PATH or greater. */
422 assert(size >= MAX_PATH);
423 return (GetTempPath(MAX_PATH, temp_dir)
424 && GetTempFileName(temp_dir, "qem", 0, filename)
425 ? 0 : -GetLastError());
426 #else
427 int fd;
428 const char *tmpdir;
429 tmpdir = getenv("TMPDIR");
430 if (!tmpdir)
431 tmpdir = "/tmp";
432 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
433 return -EOVERFLOW;
434 }
435 fd = mkstemp(filename);
436 if (fd < 0 || close(fd)) {
437 return -errno;
438 }
439 return 0;
440 #endif
441 }
442
443 /*
444 * Detect host devices. By convention, /dev/cdrom[N] is always
445 * recognized as a host CDROM.
446 */
447 static BlockDriver *find_hdev_driver(const char *filename)
448 {
449 int score_max = 0, score;
450 BlockDriver *drv = NULL, *d;
451
452 QLIST_FOREACH(d, &bdrv_drivers, list) {
453 if (d->bdrv_probe_device) {
454 score = d->bdrv_probe_device(filename);
455 if (score > score_max) {
456 score_max = score;
457 drv = d;
458 }
459 }
460 }
461
462 return drv;
463 }
464
465 BlockDriver *bdrv_find_protocol(const char *filename)
466 {
467 BlockDriver *drv1;
468 char protocol[128];
469 int len;
470 const char *p;
471
472 /* TODO Drivers without bdrv_file_open must be specified explicitly */
473
474 /*
475 * XXX(hch): we really should not let host device detection
476 * override an explicit protocol specification, but moving this
477 * later breaks access to device names with colons in them.
478 * Thanks to the brain-dead persistent naming schemes on udev-
479 * based Linux systems those actually are quite common.
480 */
481 drv1 = find_hdev_driver(filename);
482 if (drv1) {
483 return drv1;
484 }
485
486 if (!path_has_protocol(filename)) {
487 return bdrv_find_format("file");
488 }
489 p = strchr(filename, ':');
490 assert(p != NULL);
491 len = p - filename;
492 if (len > sizeof(protocol) - 1)
493 len = sizeof(protocol) - 1;
494 memcpy(protocol, filename, len);
495 protocol[len] = '\0';
496 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
497 if (drv1->protocol_name &&
498 !strcmp(drv1->protocol_name, protocol)) {
499 return drv1;
500 }
501 }
502 return NULL;
503 }
504
505 static int find_image_format(const char *filename, BlockDriver **pdrv)
506 {
507 int ret, score, score_max;
508 BlockDriver *drv1, *drv;
509 uint8_t buf[2048];
510 BlockDriverState *bs;
511
512 ret = bdrv_file_open(&bs, filename, 0);
513 if (ret < 0) {
514 *pdrv = NULL;
515 return ret;
516 }
517
518 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
519 if (bs->sg || !bdrv_is_inserted(bs)) {
520 bdrv_delete(bs);
521 drv = bdrv_find_format("raw");
522 if (!drv) {
523 ret = -ENOENT;
524 }
525 *pdrv = drv;
526 return ret;
527 }
528
529 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
530 bdrv_delete(bs);
531 if (ret < 0) {
532 *pdrv = NULL;
533 return ret;
534 }
535
536 score_max = 0;
537 drv = NULL;
538 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
539 if (drv1->bdrv_probe) {
540 score = drv1->bdrv_probe(buf, ret, filename);
541 if (score > score_max) {
542 score_max = score;
543 drv = drv1;
544 }
545 }
546 }
547 if (!drv) {
548 ret = -ENOENT;
549 }
550 *pdrv = drv;
551 return ret;
552 }
553
554 /**
555 * Set the current 'total_sectors' value
556 */
557 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
558 {
559 BlockDriver *drv = bs->drv;
560
561 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
562 if (bs->sg)
563 return 0;
564
565 /* query actual device if possible, otherwise just trust the hint */
566 if (drv->bdrv_getlength) {
567 int64_t length = drv->bdrv_getlength(bs);
568 if (length < 0) {
569 return length;
570 }
571 hint = length >> BDRV_SECTOR_BITS;
572 }
573
574 bs->total_sectors = hint;
575 return 0;
576 }
577
578 /**
579 * Set open flags for a given cache mode
580 *
581 * Return 0 on success, -1 if the cache mode was invalid.
582 */
583 int bdrv_parse_cache_flags(const char *mode, int *flags)
584 {
585 *flags &= ~BDRV_O_CACHE_MASK;
586
587 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
588 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
589 } else if (!strcmp(mode, "directsync")) {
590 *flags |= BDRV_O_NOCACHE;
591 } else if (!strcmp(mode, "writeback")) {
592 *flags |= BDRV_O_CACHE_WB;
593 } else if (!strcmp(mode, "unsafe")) {
594 *flags |= BDRV_O_CACHE_WB;
595 *flags |= BDRV_O_NO_FLUSH;
596 } else if (!strcmp(mode, "writethrough")) {
597 /* this is the default */
598 } else {
599 return -1;
600 }
601
602 return 0;
603 }
604
605 /**
606 * The copy-on-read flag is actually a reference count so multiple users may
607 * use the feature without worrying about clobbering its previous state.
608 * Copy-on-read stays enabled until all users have called to disable it.
609 */
610 void bdrv_enable_copy_on_read(BlockDriverState *bs)
611 {
612 bs->copy_on_read++;
613 }
614
615 void bdrv_disable_copy_on_read(BlockDriverState *bs)
616 {
617 assert(bs->copy_on_read > 0);
618 bs->copy_on_read--;
619 }
620
621 /*
622 * Common part for opening disk images and files
623 */
624 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
625 int flags, BlockDriver *drv)
626 {
627 int ret, open_flags;
628
629 assert(drv != NULL);
630 assert(bs->file == NULL);
631
632 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
633
634 bs->open_flags = flags;
635 bs->buffer_alignment = 512;
636
637 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
638 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
639 bdrv_enable_copy_on_read(bs);
640 }
641
642 pstrcpy(bs->filename, sizeof(bs->filename), filename);
643
644 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
645 return -ENOTSUP;
646 }
647
648 bs->drv = drv;
649 bs->opaque = g_malloc0(drv->instance_size);
650
651 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
652 open_flags = flags | BDRV_O_CACHE_WB;
653
654 /*
655 * Clear flags that are internal to the block layer before opening the
656 * image.
657 */
658 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
659
660 /*
661 * Snapshots should be writable.
662 */
663 if (bs->is_temporary) {
664 open_flags |= BDRV_O_RDWR;
665 }
666
667 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
668
669 /* Open the image, either directly or using a protocol */
670 if (drv->bdrv_file_open) {
671 ret = drv->bdrv_file_open(bs, filename, open_flags);
672 } else {
673 ret = bdrv_file_open(&bs->file, filename, open_flags);
674 if (ret >= 0) {
675 ret = drv->bdrv_open(bs, open_flags);
676 }
677 }
678
679 if (ret < 0) {
680 goto free_and_fail;
681 }
682
683 ret = refresh_total_sectors(bs, bs->total_sectors);
684 if (ret < 0) {
685 goto free_and_fail;
686 }
687
688 #ifndef _WIN32
689 if (bs->is_temporary) {
690 unlink(filename);
691 }
692 #endif
693 return 0;
694
695 free_and_fail:
696 if (bs->file) {
697 bdrv_delete(bs->file);
698 bs->file = NULL;
699 }
700 g_free(bs->opaque);
701 bs->opaque = NULL;
702 bs->drv = NULL;
703 return ret;
704 }
705
706 /*
707 * Opens a file using a protocol (file, host_device, nbd, ...)
708 */
709 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
710 {
711 BlockDriverState *bs;
712 BlockDriver *drv;
713 int ret;
714
715 drv = bdrv_find_protocol(filename);
716 if (!drv) {
717 return -ENOENT;
718 }
719
720 bs = bdrv_new("");
721 ret = bdrv_open_common(bs, filename, flags, drv);
722 if (ret < 0) {
723 bdrv_delete(bs);
724 return ret;
725 }
726 bs->growable = 1;
727 *pbs = bs;
728 return 0;
729 }
730
731 /*
732 * Opens a disk image (raw, qcow2, vmdk, ...)
733 */
734 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
735 BlockDriver *drv)
736 {
737 int ret;
738 char tmp_filename[PATH_MAX];
739
740 if (flags & BDRV_O_SNAPSHOT) {
741 BlockDriverState *bs1;
742 int64_t total_size;
743 int is_protocol = 0;
744 BlockDriver *bdrv_qcow2;
745 QEMUOptionParameter *options;
746 char backing_filename[PATH_MAX];
747
748 /* if snapshot, we create a temporary backing file and open it
749 instead of opening 'filename' directly */
750
751 /* if there is a backing file, use it */
752 bs1 = bdrv_new("");
753 ret = bdrv_open(bs1, filename, 0, drv);
754 if (ret < 0) {
755 bdrv_delete(bs1);
756 return ret;
757 }
758 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
759
760 if (bs1->drv && bs1->drv->protocol_name)
761 is_protocol = 1;
762
763 bdrv_delete(bs1);
764
765 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
766 if (ret < 0) {
767 return ret;
768 }
769
770 /* Real path is meaningless for protocols */
771 if (is_protocol)
772 snprintf(backing_filename, sizeof(backing_filename),
773 "%s", filename);
774 else if (!realpath(filename, backing_filename))
775 return -errno;
776
777 bdrv_qcow2 = bdrv_find_format("qcow2");
778 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
779
780 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
781 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
782 if (drv) {
783 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
784 drv->format_name);
785 }
786
787 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
788 free_option_parameters(options);
789 if (ret < 0) {
790 return ret;
791 }
792
793 filename = tmp_filename;
794 drv = bdrv_qcow2;
795 bs->is_temporary = 1;
796 }
797
798 /* Find the right image format driver */
799 if (!drv) {
800 ret = find_image_format(filename, &drv);
801 }
802
803 if (!drv) {
804 goto unlink_and_fail;
805 }
806
807 /* Open the image */
808 ret = bdrv_open_common(bs, filename, flags, drv);
809 if (ret < 0) {
810 goto unlink_and_fail;
811 }
812
813 /* If there is a backing file, use it */
814 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
815 char backing_filename[PATH_MAX];
816 int back_flags;
817 BlockDriver *back_drv = NULL;
818
819 bs->backing_hd = bdrv_new("");
820 bdrv_get_full_backing_filename(bs, backing_filename,
821 sizeof(backing_filename));
822
823 if (bs->backing_format[0] != '\0') {
824 back_drv = bdrv_find_format(bs->backing_format);
825 }
826
827 /* backing files always opened read-only */
828 back_flags =
829 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
830
831 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
832 if (ret < 0) {
833 bdrv_close(bs);
834 return ret;
835 }
836 if (bs->is_temporary) {
837 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
838 } else {
839 /* base image inherits from "parent" */
840 bs->backing_hd->keep_read_only = bs->keep_read_only;
841 }
842 }
843
844 if (!bdrv_key_required(bs)) {
845 bdrv_dev_change_media_cb(bs, true);
846 }
847
848 /* throttling disk I/O limits */
849 if (bs->io_limits_enabled) {
850 bdrv_io_limits_enable(bs);
851 }
852
853 return 0;
854
855 unlink_and_fail:
856 if (bs->is_temporary) {
857 unlink(filename);
858 }
859 return ret;
860 }
861
862 void bdrv_close(BlockDriverState *bs)
863 {
864 bdrv_flush(bs);
865 if (bs->drv) {
866 if (bs->job) {
867 block_job_cancel_sync(bs->job);
868 }
869 bdrv_drain_all();
870
871 if (bs == bs_snapshots) {
872 bs_snapshots = NULL;
873 }
874 if (bs->backing_hd) {
875 bdrv_delete(bs->backing_hd);
876 bs->backing_hd = NULL;
877 }
878 bs->drv->bdrv_close(bs);
879 g_free(bs->opaque);
880 #ifdef _WIN32
881 if (bs->is_temporary) {
882 unlink(bs->filename);
883 }
884 #endif
885 bs->opaque = NULL;
886 bs->drv = NULL;
887 bs->copy_on_read = 0;
888 bs->backing_file[0] = '\0';
889 bs->backing_format[0] = '\0';
890 bs->total_sectors = 0;
891 bs->encrypted = 0;
892 bs->valid_key = 0;
893 bs->sg = 0;
894 bs->growable = 0;
895
896 if (bs->file != NULL) {
897 bdrv_delete(bs->file);
898 bs->file = NULL;
899 }
900
901 bdrv_dev_change_media_cb(bs, false);
902 }
903
904 /*throttling disk I/O limits*/
905 if (bs->io_limits_enabled) {
906 bdrv_io_limits_disable(bs);
907 }
908 }
909
910 void bdrv_close_all(void)
911 {
912 BlockDriverState *bs;
913
914 QTAILQ_FOREACH(bs, &bdrv_states, list) {
915 bdrv_close(bs);
916 }
917 }
918
919 /*
920 * Wait for pending requests to complete across all BlockDriverStates
921 *
922 * This function does not flush data to disk, use bdrv_flush_all() for that
923 * after calling this function.
924 *
925 * Note that completion of an asynchronous I/O operation can trigger any
926 * number of other I/O operations on other devices---for example a coroutine
927 * can be arbitrarily complex and a constant flow of I/O can come until the
928 * coroutine is complete. Because of this, it is not possible to have a
929 * function to drain a single device's I/O queue.
930 */
931 void bdrv_drain_all(void)
932 {
933 BlockDriverState *bs;
934 bool busy;
935
936 do {
937 busy = qemu_aio_wait();
938
939 /* FIXME: We do not have timer support here, so this is effectively
940 * a busy wait.
941 */
942 QTAILQ_FOREACH(bs, &bdrv_states, list) {
943 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
944 qemu_co_queue_restart_all(&bs->throttled_reqs);
945 busy = true;
946 }
947 }
948 } while (busy);
949
950 /* If requests are still pending there is a bug somewhere */
951 QTAILQ_FOREACH(bs, &bdrv_states, list) {
952 assert(QLIST_EMPTY(&bs->tracked_requests));
953 assert(qemu_co_queue_empty(&bs->throttled_reqs));
954 }
955 }
956
957 /* make a BlockDriverState anonymous by removing from bdrv_state list.
958 Also, NULL terminate the device_name to prevent double remove */
959 void bdrv_make_anon(BlockDriverState *bs)
960 {
961 if (bs->device_name[0] != '\0') {
962 QTAILQ_REMOVE(&bdrv_states, bs, list);
963 }
964 bs->device_name[0] = '\0';
965 }
966
967 static void bdrv_rebind(BlockDriverState *bs)
968 {
969 if (bs->drv && bs->drv->bdrv_rebind) {
970 bs->drv->bdrv_rebind(bs);
971 }
972 }
973
974 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
975 BlockDriverState *bs_src)
976 {
977 /* move some fields that need to stay attached to the device */
978 bs_dest->open_flags = bs_src->open_flags;
979
980 /* dev info */
981 bs_dest->dev_ops = bs_src->dev_ops;
982 bs_dest->dev_opaque = bs_src->dev_opaque;
983 bs_dest->dev = bs_src->dev;
984 bs_dest->buffer_alignment = bs_src->buffer_alignment;
985 bs_dest->copy_on_read = bs_src->copy_on_read;
986
987 bs_dest->enable_write_cache = bs_src->enable_write_cache;
988
989 /* i/o timing parameters */
990 bs_dest->slice_time = bs_src->slice_time;
991 bs_dest->slice_start = bs_src->slice_start;
992 bs_dest->slice_end = bs_src->slice_end;
993 bs_dest->io_limits = bs_src->io_limits;
994 bs_dest->io_base = bs_src->io_base;
995 bs_dest->throttled_reqs = bs_src->throttled_reqs;
996 bs_dest->block_timer = bs_src->block_timer;
997 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
998
999 /* r/w error */
1000 bs_dest->on_read_error = bs_src->on_read_error;
1001 bs_dest->on_write_error = bs_src->on_write_error;
1002
1003 /* i/o status */
1004 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1005 bs_dest->iostatus = bs_src->iostatus;
1006
1007 /* dirty bitmap */
1008 bs_dest->dirty_count = bs_src->dirty_count;
1009 bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
1010
1011 /* job */
1012 bs_dest->in_use = bs_src->in_use;
1013 bs_dest->job = bs_src->job;
1014
1015 /* keep the same entry in bdrv_states */
1016 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1017 bs_src->device_name);
1018 bs_dest->list = bs_src->list;
1019 }
1020
1021 /*
1022 * Swap bs contents for two image chains while they are live,
1023 * while keeping required fields on the BlockDriverState that is
1024 * actually attached to a device.
1025 *
1026 * This will modify the BlockDriverState fields, and swap contents
1027 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1028 *
1029 * bs_new is required to be anonymous.
1030 *
1031 * This function does not create any image files.
1032 */
1033 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1034 {
1035 BlockDriverState tmp;
1036
1037 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1038 assert(bs_new->device_name[0] == '\0');
1039 assert(bs_new->dirty_bitmap == NULL);
1040 assert(bs_new->job == NULL);
1041 assert(bs_new->dev == NULL);
1042 assert(bs_new->in_use == 0);
1043 assert(bs_new->io_limits_enabled == false);
1044 assert(bs_new->block_timer == NULL);
1045
1046 tmp = *bs_new;
1047 *bs_new = *bs_old;
1048 *bs_old = tmp;
1049
1050 /* there are some fields that should not be swapped, move them back */
1051 bdrv_move_feature_fields(&tmp, bs_old);
1052 bdrv_move_feature_fields(bs_old, bs_new);
1053 bdrv_move_feature_fields(bs_new, &tmp);
1054
1055 /* bs_new shouldn't be in bdrv_states even after the swap! */
1056 assert(bs_new->device_name[0] == '\0');
1057
1058 /* Check a few fields that should remain attached to the device */
1059 assert(bs_new->dev == NULL);
1060 assert(bs_new->job == NULL);
1061 assert(bs_new->in_use == 0);
1062 assert(bs_new->io_limits_enabled == false);
1063 assert(bs_new->block_timer == NULL);
1064
1065 bdrv_rebind(bs_new);
1066 bdrv_rebind(bs_old);
1067 }
1068
1069 /*
1070 * Add new bs contents at the top of an image chain while the chain is
1071 * live, while keeping required fields on the top layer.
1072 *
1073 * This will modify the BlockDriverState fields, and swap contents
1074 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1075 *
1076 * bs_new is required to be anonymous.
1077 *
1078 * This function does not create any image files.
1079 */
1080 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1081 {
1082 bdrv_swap(bs_new, bs_top);
1083
1084 /* The contents of 'tmp' will become bs_top, as we are
1085 * swapping bs_new and bs_top contents. */
1086 bs_top->backing_hd = bs_new;
1087 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1088 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1089 bs_new->filename);
1090 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1091 bs_new->drv ? bs_new->drv->format_name : "");
1092 }
1093
1094 void bdrv_delete(BlockDriverState *bs)
1095 {
1096 assert(!bs->dev);
1097 assert(!bs->job);
1098 assert(!bs->in_use);
1099
1100 /* remove from list, if necessary */
1101 bdrv_make_anon(bs);
1102
1103 bdrv_close(bs);
1104
1105 assert(bs != bs_snapshots);
1106 g_free(bs);
1107 }
1108
1109 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1110 /* TODO change to DeviceState *dev when all users are qdevified */
1111 {
1112 if (bs->dev) {
1113 return -EBUSY;
1114 }
1115 bs->dev = dev;
1116 bdrv_iostatus_reset(bs);
1117 return 0;
1118 }
1119
1120 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1121 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
1122 {
1123 if (bdrv_attach_dev(bs, dev) < 0) {
1124 abort();
1125 }
1126 }
1127
1128 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1129 /* TODO change to DeviceState *dev when all users are qdevified */
1130 {
1131 assert(bs->dev == dev);
1132 bs->dev = NULL;
1133 bs->dev_ops = NULL;
1134 bs->dev_opaque = NULL;
1135 bs->buffer_alignment = 512;
1136 }
1137
1138 /* TODO change to return DeviceState * when all users are qdevified */
1139 void *bdrv_get_attached_dev(BlockDriverState *bs)
1140 {
1141 return bs->dev;
1142 }
1143
1144 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1145 void *opaque)
1146 {
1147 bs->dev_ops = ops;
1148 bs->dev_opaque = opaque;
1149 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
1150 bs_snapshots = NULL;
1151 }
1152 }
1153
1154 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1155 BlockQMPEventAction action, int is_read)
1156 {
1157 QObject *data;
1158 const char *action_str;
1159
1160 switch (action) {
1161 case BDRV_ACTION_REPORT:
1162 action_str = "report";
1163 break;
1164 case BDRV_ACTION_IGNORE:
1165 action_str = "ignore";
1166 break;
1167 case BDRV_ACTION_STOP:
1168 action_str = "stop";
1169 break;
1170 default:
1171 abort();
1172 }
1173
1174 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1175 bdrv->device_name,
1176 action_str,
1177 is_read ? "read" : "write");
1178 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1179
1180 qobject_decref(data);
1181 }
1182
1183 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1184 {
1185 QObject *data;
1186
1187 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1188 bdrv_get_device_name(bs), ejected);
1189 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1190
1191 qobject_decref(data);
1192 }
1193
1194 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
1195 {
1196 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
1197 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
1198 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
1199 if (tray_was_closed) {
1200 /* tray open */
1201 bdrv_emit_qmp_eject_event(bs, true);
1202 }
1203 if (load) {
1204 /* tray close */
1205 bdrv_emit_qmp_eject_event(bs, false);
1206 }
1207 }
1208 }
1209
1210 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1211 {
1212 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1213 }
1214
1215 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1216 {
1217 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1218 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1219 }
1220 }
1221
1222 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1223 {
1224 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1225 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1226 }
1227 return false;
1228 }
1229
1230 static void bdrv_dev_resize_cb(BlockDriverState *bs)
1231 {
1232 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1233 bs->dev_ops->resize_cb(bs->dev_opaque);
1234 }
1235 }
1236
1237 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1238 {
1239 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1240 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1241 }
1242 return false;
1243 }
1244
1245 /*
1246 * Run consistency checks on an image
1247 *
1248 * Returns 0 if the check could be completed (it doesn't mean that the image is
1249 * free of errors) or -errno when an internal error occurred. The results of the
1250 * check are stored in res.
1251 */
1252 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
1253 {
1254 if (bs->drv->bdrv_check == NULL) {
1255 return -ENOTSUP;
1256 }
1257
1258 memset(res, 0, sizeof(*res));
1259 return bs->drv->bdrv_check(bs, res, fix);
1260 }
1261
1262 #define COMMIT_BUF_SECTORS 2048
1263
1264 /* commit COW file into the raw image */
1265 int bdrv_commit(BlockDriverState *bs)
1266 {
1267 BlockDriver *drv = bs->drv;
1268 BlockDriver *backing_drv;
1269 int64_t sector, total_sectors;
1270 int n, ro, open_flags;
1271 int ret = 0, rw_ret = 0;
1272 uint8_t *buf;
1273 char filename[1024];
1274 BlockDriverState *bs_rw, *bs_ro;
1275
1276 if (!drv)
1277 return -ENOMEDIUM;
1278
1279 if (!bs->backing_hd) {
1280 return -ENOTSUP;
1281 }
1282
1283 if (bs->backing_hd->keep_read_only) {
1284 return -EACCES;
1285 }
1286
1287 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1288 return -EBUSY;
1289 }
1290
1291 backing_drv = bs->backing_hd->drv;
1292 ro = bs->backing_hd->read_only;
1293 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1294 open_flags = bs->backing_hd->open_flags;
1295
1296 if (ro) {
1297 /* re-open as RW */
1298 bdrv_delete(bs->backing_hd);
1299 bs->backing_hd = NULL;
1300 bs_rw = bdrv_new("");
1301 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
1302 backing_drv);
1303 if (rw_ret < 0) {
1304 bdrv_delete(bs_rw);
1305 /* try to re-open read-only */
1306 bs_ro = bdrv_new("");
1307 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1308 backing_drv);
1309 if (ret < 0) {
1310 bdrv_delete(bs_ro);
1311 /* drive not functional anymore */
1312 bs->drv = NULL;
1313 return ret;
1314 }
1315 bs->backing_hd = bs_ro;
1316 return rw_ret;
1317 }
1318 bs->backing_hd = bs_rw;
1319 }
1320
1321 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1322 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1323
1324 for (sector = 0; sector < total_sectors; sector += n) {
1325 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
1326
1327 if (bdrv_read(bs, sector, buf, n) != 0) {
1328 ret = -EIO;
1329 goto ro_cleanup;
1330 }
1331
1332 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1333 ret = -EIO;
1334 goto ro_cleanup;
1335 }
1336 }
1337 }
1338
1339 if (drv->bdrv_make_empty) {
1340 ret = drv->bdrv_make_empty(bs);
1341 bdrv_flush(bs);
1342 }
1343
1344 /*
1345 * Make sure all data we wrote to the backing device is actually
1346 * stable on disk.
1347 */
1348 if (bs->backing_hd)
1349 bdrv_flush(bs->backing_hd);
1350
1351 ro_cleanup:
1352 g_free(buf);
1353
1354 if (ro) {
1355 /* re-open as RO */
1356 bdrv_delete(bs->backing_hd);
1357 bs->backing_hd = NULL;
1358 bs_ro = bdrv_new("");
1359 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1360 backing_drv);
1361 if (ret < 0) {
1362 bdrv_delete(bs_ro);
1363 /* drive not functional anymore */
1364 bs->drv = NULL;
1365 return ret;
1366 }
1367 bs->backing_hd = bs_ro;
1368 bs->backing_hd->keep_read_only = 0;
1369 }
1370
1371 return ret;
1372 }
1373
1374 int bdrv_commit_all(void)
1375 {
1376 BlockDriverState *bs;
1377
1378 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1379 int ret = bdrv_commit(bs);
1380 if (ret < 0) {
1381 return ret;
1382 }
1383 }
1384 return 0;
1385 }
1386
1387 struct BdrvTrackedRequest {
1388 BlockDriverState *bs;
1389 int64_t sector_num;
1390 int nb_sectors;
1391 bool is_write;
1392 QLIST_ENTRY(BdrvTrackedRequest) list;
1393 Coroutine *co; /* owner, used for deadlock detection */
1394 CoQueue wait_queue; /* coroutines blocked on this request */
1395 };
1396
1397 /**
1398 * Remove an active request from the tracked requests list
1399 *
1400 * This function should be called when a tracked request is completing.
1401 */
1402 static void tracked_request_end(BdrvTrackedRequest *req)
1403 {
1404 QLIST_REMOVE(req, list);
1405 qemu_co_queue_restart_all(&req->wait_queue);
1406 }
1407
1408 /**
1409 * Add an active request to the tracked requests list
1410 */
1411 static void tracked_request_begin(BdrvTrackedRequest *req,
1412 BlockDriverState *bs,
1413 int64_t sector_num,
1414 int nb_sectors, bool is_write)
1415 {
1416 *req = (BdrvTrackedRequest){
1417 .bs = bs,
1418 .sector_num = sector_num,
1419 .nb_sectors = nb_sectors,
1420 .is_write = is_write,
1421 .co = qemu_coroutine_self(),
1422 };
1423
1424 qemu_co_queue_init(&req->wait_queue);
1425
1426 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1427 }
1428
1429 /**
1430 * Round a region to cluster boundaries
1431 */
1432 static void round_to_clusters(BlockDriverState *bs,
1433 int64_t sector_num, int nb_sectors,
1434 int64_t *cluster_sector_num,
1435 int *cluster_nb_sectors)
1436 {
1437 BlockDriverInfo bdi;
1438
1439 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1440 *cluster_sector_num = sector_num;
1441 *cluster_nb_sectors = nb_sectors;
1442 } else {
1443 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1444 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1445 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1446 nb_sectors, c);
1447 }
1448 }
1449
1450 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1451 int64_t sector_num, int nb_sectors) {
1452 /* aaaa bbbb */
1453 if (sector_num >= req->sector_num + req->nb_sectors) {
1454 return false;
1455 }
1456 /* bbbb aaaa */
1457 if (req->sector_num >= sector_num + nb_sectors) {
1458 return false;
1459 }
1460 return true;
1461 }
1462
1463 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1464 int64_t sector_num, int nb_sectors)
1465 {
1466 BdrvTrackedRequest *req;
1467 int64_t cluster_sector_num;
1468 int cluster_nb_sectors;
1469 bool retry;
1470
1471 /* If we touch the same cluster it counts as an overlap. This guarantees
1472 * that allocating writes will be serialized and not race with each other
1473 * for the same cluster. For example, in copy-on-read it ensures that the
1474 * CoR read and write operations are atomic and guest writes cannot
1475 * interleave between them.
1476 */
1477 round_to_clusters(bs, sector_num, nb_sectors,
1478 &cluster_sector_num, &cluster_nb_sectors);
1479
1480 do {
1481 retry = false;
1482 QLIST_FOREACH(req, &bs->tracked_requests, list) {
1483 if (tracked_request_overlaps(req, cluster_sector_num,
1484 cluster_nb_sectors)) {
1485 /* Hitting this means there was a reentrant request, for
1486 * example, a block driver issuing nested requests. This must
1487 * never happen since it means deadlock.
1488 */
1489 assert(qemu_coroutine_self() != req->co);
1490
1491 qemu_co_queue_wait(&req->wait_queue);
1492 retry = true;
1493 break;
1494 }
1495 }
1496 } while (retry);
1497 }
1498
1499 /*
1500 * Return values:
1501 * 0 - success
1502 * -EINVAL - backing format specified, but no file
1503 * -ENOSPC - can't update the backing file because no space is left in the
1504 * image file header
1505 * -ENOTSUP - format driver doesn't support changing the backing file
1506 */
1507 int bdrv_change_backing_file(BlockDriverState *bs,
1508 const char *backing_file, const char *backing_fmt)
1509 {
1510 BlockDriver *drv = bs->drv;
1511 int ret;
1512
1513 /* Backing file format doesn't make sense without a backing file */
1514 if (backing_fmt && !backing_file) {
1515 return -EINVAL;
1516 }
1517
1518 if (drv->bdrv_change_backing_file != NULL) {
1519 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
1520 } else {
1521 ret = -ENOTSUP;
1522 }
1523
1524 if (ret == 0) {
1525 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
1526 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
1527 }
1528 return ret;
1529 }
1530
1531 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1532 size_t size)
1533 {
1534 int64_t len;
1535
1536 if (!bdrv_is_inserted(bs))
1537 return -ENOMEDIUM;
1538
1539 if (bs->growable)
1540 return 0;
1541
1542 len = bdrv_getlength(bs);
1543
1544 if (offset < 0)
1545 return -EIO;
1546
1547 if ((offset > len) || (len - offset < size))
1548 return -EIO;
1549
1550 return 0;
1551 }
1552
1553 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1554 int nb_sectors)
1555 {
1556 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1557 nb_sectors * BDRV_SECTOR_SIZE);
1558 }
1559
1560 typedef struct RwCo {
1561 BlockDriverState *bs;
1562 int64_t sector_num;
1563 int nb_sectors;
1564 QEMUIOVector *qiov;
1565 bool is_write;
1566 int ret;
1567 } RwCo;
1568
1569 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1570 {
1571 RwCo *rwco = opaque;
1572
1573 if (!rwco->is_write) {
1574 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
1575 rwco->nb_sectors, rwco->qiov, 0);
1576 } else {
1577 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
1578 rwco->nb_sectors, rwco->qiov, 0);
1579 }
1580 }
1581
1582 /*
1583 * Process a synchronous request using coroutines
1584 */
1585 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1586 int nb_sectors, bool is_write)
1587 {
1588 QEMUIOVector qiov;
1589 struct iovec iov = {
1590 .iov_base = (void *)buf,
1591 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1592 };
1593 Coroutine *co;
1594 RwCo rwco = {
1595 .bs = bs,
1596 .sector_num = sector_num,
1597 .nb_sectors = nb_sectors,
1598 .qiov = &qiov,
1599 .is_write = is_write,
1600 .ret = NOT_DONE,
1601 };
1602
1603 qemu_iovec_init_external(&qiov, &iov, 1);
1604
1605 /**
1606 * In sync call context, when the vcpu is blocked, this throttling timer
1607 * will not fire; so the I/O throttling function has to be disabled here
1608 * if it has been enabled.
1609 */
1610 if (bs->io_limits_enabled) {
1611 fprintf(stderr, "Disabling I/O throttling on '%s' due "
1612 "to synchronous I/O.\n", bdrv_get_device_name(bs));
1613 bdrv_io_limits_disable(bs);
1614 }
1615
1616 if (qemu_in_coroutine()) {
1617 /* Fast-path if already in coroutine context */
1618 bdrv_rw_co_entry(&rwco);
1619 } else {
1620 co = qemu_coroutine_create(bdrv_rw_co_entry);
1621 qemu_coroutine_enter(co, &rwco);
1622 while (rwco.ret == NOT_DONE) {
1623 qemu_aio_wait();
1624 }
1625 }
1626 return rwco.ret;
1627 }
1628
1629 /* return < 0 if error. See bdrv_write() for the return codes */
1630 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
1631 uint8_t *buf, int nb_sectors)
1632 {
1633 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
1634 }
1635
1636 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
1637 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
1638 uint8_t *buf, int nb_sectors)
1639 {
1640 bool enabled;
1641 int ret;
1642
1643 enabled = bs->io_limits_enabled;
1644 bs->io_limits_enabled = false;
1645 ret = bdrv_read(bs, 0, buf, 1);
1646 bs->io_limits_enabled = enabled;
1647 return ret;
1648 }
1649
1650 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
1651
1652 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
1653 int nb_sectors, int dirty)
1654 {
1655 int64_t start, end;
1656 unsigned long val, idx, bit;
1657
1658 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
1659 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
1660
1661 for (; start <= end; start++) {
1662 idx = start / BITS_PER_LONG;
1663 bit = start % BITS_PER_LONG;
1664 val = bs->dirty_bitmap[idx];
1665 if (dirty) {
1666 if (!(val & (1UL << bit))) {
1667 bs->dirty_count++;
1668 val |= 1UL << bit;
1669 }
1670 } else {
1671 if (val & (1UL << bit)) {
1672 bs->dirty_count--;
1673 val &= ~(1UL << bit);
1674 }
1675 }
1676 bs->dirty_bitmap[idx] = val;
1677 }
1678 }
1679
1680 /* Return < 0 if error. Important errors are:
1681 -EIO generic I/O error (may happen for all errors)
1682 -ENOMEDIUM No media inserted.
1683 -EINVAL Invalid sector number or nb_sectors
1684 -EACCES Trying to write a read-only device
1685 */
1686 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
1687 const uint8_t *buf, int nb_sectors)
1688 {
1689 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
1690 }
1691
1692 int bdrv_pread(BlockDriverState *bs, int64_t offset,
1693 void *buf, int count1)
1694 {
1695 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1696 int len, nb_sectors, count;
1697 int64_t sector_num;
1698 int ret;
1699
1700 count = count1;
1701 /* first read to align to sector start */
1702 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1703 if (len > count)
1704 len = count;
1705 sector_num = offset >> BDRV_SECTOR_BITS;
1706 if (len > 0) {
1707 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1708 return ret;
1709 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1710 count -= len;
1711 if (count == 0)
1712 return count1;
1713 sector_num++;
1714 buf += len;
1715 }
1716
1717 /* read the sectors "in place" */
1718 nb_sectors = count >> BDRV_SECTOR_BITS;
1719 if (nb_sectors > 0) {
1720 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1721 return ret;
1722 sector_num += nb_sectors;
1723 len = nb_sectors << BDRV_SECTOR_BITS;
1724 buf += len;
1725 count -= len;
1726 }
1727
1728 /* add data from the last sector */
1729 if (count > 0) {
1730 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1731 return ret;
1732 memcpy(buf, tmp_buf, count);
1733 }
1734 return count1;
1735 }
1736
1737 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1738 const void *buf, int count1)
1739 {
1740 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1741 int len, nb_sectors, count;
1742 int64_t sector_num;
1743 int ret;
1744
1745 count = count1;
1746 /* first write to align to sector start */
1747 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1748 if (len > count)
1749 len = count;
1750 sector_num = offset >> BDRV_SECTOR_BITS;
1751 if (len > 0) {
1752 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1753 return ret;
1754 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1755 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1756 return ret;
1757 count -= len;
1758 if (count == 0)
1759 return count1;
1760 sector_num++;
1761 buf += len;
1762 }
1763
1764 /* write the sectors "in place" */
1765 nb_sectors = count >> BDRV_SECTOR_BITS;
1766 if (nb_sectors > 0) {
1767 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1768 return ret;
1769 sector_num += nb_sectors;
1770 len = nb_sectors << BDRV_SECTOR_BITS;
1771 buf += len;
1772 count -= len;
1773 }
1774
1775 /* add data from the last sector */
1776 if (count > 0) {
1777 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1778 return ret;
1779 memcpy(tmp_buf, buf, count);
1780 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1781 return ret;
1782 }
1783 return count1;
1784 }
1785
1786 /*
1787 * Writes to the file and ensures that no writes are reordered across this
1788 * request (acts as a barrier)
1789 *
1790 * Returns 0 on success, -errno in error cases.
1791 */
1792 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1793 const void *buf, int count)
1794 {
1795 int ret;
1796
1797 ret = bdrv_pwrite(bs, offset, buf, count);
1798 if (ret < 0) {
1799 return ret;
1800 }
1801
1802 /* No flush needed for cache modes that already do it */
1803 if (bs->enable_write_cache) {
1804 bdrv_flush(bs);
1805 }
1806
1807 return 0;
1808 }
1809
1810 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
1811 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1812 {
1813 /* Perform I/O through a temporary buffer so that users who scribble over
1814 * their read buffer while the operation is in progress do not end up
1815 * modifying the image file. This is critical for zero-copy guest I/O
1816 * where anything might happen inside guest memory.
1817 */
1818 void *bounce_buffer;
1819
1820 BlockDriver *drv = bs->drv;
1821 struct iovec iov;
1822 QEMUIOVector bounce_qiov;
1823 int64_t cluster_sector_num;
1824 int cluster_nb_sectors;
1825 size_t skip_bytes;
1826 int ret;
1827
1828 /* Cover entire cluster so no additional backing file I/O is required when
1829 * allocating cluster in the image file.
1830 */
1831 round_to_clusters(bs, sector_num, nb_sectors,
1832 &cluster_sector_num, &cluster_nb_sectors);
1833
1834 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
1835 cluster_sector_num, cluster_nb_sectors);
1836
1837 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
1838 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
1839 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
1840
1841 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
1842 &bounce_qiov);
1843 if (ret < 0) {
1844 goto err;
1845 }
1846
1847 if (drv->bdrv_co_write_zeroes &&
1848 buffer_is_zero(bounce_buffer, iov.iov_len)) {
1849 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
1850 cluster_nb_sectors);
1851 } else {
1852 /* This does not change the data on the disk, it is not necessary
1853 * to flush even in cache=writethrough mode.
1854 */
1855 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
1856 &bounce_qiov);
1857 }
1858
1859 if (ret < 0) {
1860 /* It might be okay to ignore write errors for guest requests. If this
1861 * is a deliberate copy-on-read then we don't want to ignore the error.
1862 * Simply report it in all cases.
1863 */
1864 goto err;
1865 }
1866
1867 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
1868 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
1869 nb_sectors * BDRV_SECTOR_SIZE);
1870
1871 err:
1872 qemu_vfree(bounce_buffer);
1873 return ret;
1874 }
1875
1876 /*
1877 * Handle a read request in coroutine context
1878 */
1879 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1880 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1881 BdrvRequestFlags flags)
1882 {
1883 BlockDriver *drv = bs->drv;
1884 BdrvTrackedRequest req;
1885 int ret;
1886
1887 if (!drv) {
1888 return -ENOMEDIUM;
1889 }
1890 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1891 return -EIO;
1892 }
1893
1894 /* throttling disk read I/O */
1895 if (bs->io_limits_enabled) {
1896 bdrv_io_limits_intercept(bs, false, nb_sectors);
1897 }
1898
1899 if (bs->copy_on_read) {
1900 flags |= BDRV_REQ_COPY_ON_READ;
1901 }
1902 if (flags & BDRV_REQ_COPY_ON_READ) {
1903 bs->copy_on_read_in_flight++;
1904 }
1905
1906 if (bs->copy_on_read_in_flight) {
1907 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1908 }
1909
1910 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
1911
1912 if (flags & BDRV_REQ_COPY_ON_READ) {
1913 int pnum;
1914
1915 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
1916 if (ret < 0) {
1917 goto out;
1918 }
1919
1920 if (!ret || pnum != nb_sectors) {
1921 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
1922 goto out;
1923 }
1924 }
1925
1926 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1927
1928 out:
1929 tracked_request_end(&req);
1930
1931 if (flags & BDRV_REQ_COPY_ON_READ) {
1932 bs->copy_on_read_in_flight--;
1933 }
1934
1935 return ret;
1936 }
1937
1938 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1939 int nb_sectors, QEMUIOVector *qiov)
1940 {
1941 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1942
1943 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1944 }
1945
1946 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1947 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1948 {
1949 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1950
1951 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1952 BDRV_REQ_COPY_ON_READ);
1953 }
1954
1955 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1956 int64_t sector_num, int nb_sectors)
1957 {
1958 BlockDriver *drv = bs->drv;
1959 QEMUIOVector qiov;
1960 struct iovec iov;
1961 int ret;
1962
1963 /* TODO Emulate only part of misaligned requests instead of letting block
1964 * drivers return -ENOTSUP and emulate everything */
1965
1966 /* First try the efficient write zeroes operation */
1967 if (drv->bdrv_co_write_zeroes) {
1968 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
1969 if (ret != -ENOTSUP) {
1970 return ret;
1971 }
1972 }
1973
1974 /* Fall back to bounce buffer if write zeroes is unsupported */
1975 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
1976 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
1977 memset(iov.iov_base, 0, iov.iov_len);
1978 qemu_iovec_init_external(&qiov, &iov, 1);
1979
1980 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
1981
1982 qemu_vfree(iov.iov_base);
1983 return ret;
1984 }
1985
1986 /*
1987 * Handle a write request in coroutine context
1988 */
1989 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1990 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1991 BdrvRequestFlags flags)
1992 {
1993 BlockDriver *drv = bs->drv;
1994 BdrvTrackedRequest req;
1995 int ret;
1996
1997 if (!bs->drv) {
1998 return -ENOMEDIUM;
1999 }
2000 if (bs->read_only) {
2001 return -EACCES;
2002 }
2003 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2004 return -EIO;
2005 }
2006
2007 /* throttling disk write I/O */
2008 if (bs->io_limits_enabled) {
2009 bdrv_io_limits_intercept(bs, true, nb_sectors);
2010 }
2011
2012 if (bs->copy_on_read_in_flight) {
2013 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2014 }
2015
2016 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
2017
2018 if (flags & BDRV_REQ_ZERO_WRITE) {
2019 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
2020 } else {
2021 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
2022 }
2023
2024 if (ret == 0 && !bs->enable_write_cache) {
2025 ret = bdrv_co_flush(bs);
2026 }
2027
2028 if (bs->dirty_bitmap) {
2029 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2030 }
2031
2032 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2033 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2034 }
2035
2036 tracked_request_end(&req);
2037
2038 return ret;
2039 }
2040
2041 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
2042 int nb_sectors, QEMUIOVector *qiov)
2043 {
2044 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
2045
2046 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
2047 }
2048
2049 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
2050 int64_t sector_num, int nb_sectors)
2051 {
2052 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2053
2054 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
2055 BDRV_REQ_ZERO_WRITE);
2056 }
2057
2058 /**
2059 * Truncate file to 'offset' bytes (needed only for file protocols)
2060 */
2061 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
2062 {
2063 BlockDriver *drv = bs->drv;
2064 int ret;
2065 if (!drv)
2066 return -ENOMEDIUM;
2067 if (!drv->bdrv_truncate)
2068 return -ENOTSUP;
2069 if (bs->read_only)
2070 return -EACCES;
2071 if (bdrv_in_use(bs))
2072 return -EBUSY;
2073 ret = drv->bdrv_truncate(bs, offset);
2074 if (ret == 0) {
2075 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
2076 bdrv_dev_resize_cb(bs);
2077 }
2078 return ret;
2079 }
2080
2081 /**
2082 * Length of a allocated file in bytes. Sparse files are counted by actual
2083 * allocated space. Return < 0 if error or unknown.
2084 */
2085 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
2086 {
2087 BlockDriver *drv = bs->drv;
2088 if (!drv) {
2089 return -ENOMEDIUM;
2090 }
2091 if (drv->bdrv_get_allocated_file_size) {
2092 return drv->bdrv_get_allocated_file_size(bs);
2093 }
2094 if (bs->file) {
2095 return bdrv_get_allocated_file_size(bs->file);
2096 }
2097 return -ENOTSUP;
2098 }
2099
2100 /**
2101 * Length of a file in bytes. Return < 0 if error or unknown.
2102 */
2103 int64_t bdrv_getlength(BlockDriverState *bs)
2104 {
2105 BlockDriver *drv = bs->drv;
2106 if (!drv)
2107 return -ENOMEDIUM;
2108
2109 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
2110 if (drv->bdrv_getlength) {
2111 return drv->bdrv_getlength(bs);
2112 }
2113 }
2114 return bs->total_sectors * BDRV_SECTOR_SIZE;
2115 }
2116
2117 /* return 0 as number of sectors if no device present or error */
2118 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
2119 {
2120 int64_t length;
2121 length = bdrv_getlength(bs);
2122 if (length < 0)
2123 length = 0;
2124 else
2125 length = length >> BDRV_SECTOR_BITS;
2126 *nb_sectors_ptr = length;
2127 }
2128
2129 /* throttling disk io limits */
2130 void bdrv_set_io_limits(BlockDriverState *bs,
2131 BlockIOLimit *io_limits)
2132 {
2133 bs->io_limits = *io_limits;
2134 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
2135 }
2136
2137 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
2138 BlockErrorAction on_write_error)
2139 {
2140 bs->on_read_error = on_read_error;
2141 bs->on_write_error = on_write_error;
2142 }
2143
2144 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
2145 {
2146 return is_read ? bs->on_read_error : bs->on_write_error;
2147 }
2148
2149 int bdrv_is_read_only(BlockDriverState *bs)
2150 {
2151 return bs->read_only;
2152 }
2153
2154 int bdrv_is_sg(BlockDriverState *bs)
2155 {
2156 return bs->sg;
2157 }
2158
2159 int bdrv_enable_write_cache(BlockDriverState *bs)
2160 {
2161 return bs->enable_write_cache;
2162 }
2163
2164 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
2165 {
2166 bs->enable_write_cache = wce;
2167 }
2168
2169 int bdrv_is_encrypted(BlockDriverState *bs)
2170 {
2171 if (bs->backing_hd && bs->backing_hd->encrypted)
2172 return 1;
2173 return bs->encrypted;
2174 }
2175
2176 int bdrv_key_required(BlockDriverState *bs)
2177 {
2178 BlockDriverState *backing_hd = bs->backing_hd;
2179
2180 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2181 return 1;
2182 return (bs->encrypted && !bs->valid_key);
2183 }
2184
2185 int bdrv_set_key(BlockDriverState *bs, const char *key)
2186 {
2187 int ret;
2188 if (bs->backing_hd && bs->backing_hd->encrypted) {
2189 ret = bdrv_set_key(bs->backing_hd, key);
2190 if (ret < 0)
2191 return ret;
2192 if (!bs->encrypted)
2193 return 0;
2194 }
2195 if (!bs->encrypted) {
2196 return -EINVAL;
2197 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2198 return -ENOMEDIUM;
2199 }
2200 ret = bs->drv->bdrv_set_key(bs, key);
2201 if (ret < 0) {
2202 bs->valid_key = 0;
2203 } else if (!bs->valid_key) {
2204 bs->valid_key = 1;
2205 /* call the change callback now, we skipped it on open */
2206 bdrv_dev_change_media_cb(bs, true);
2207 }
2208 return ret;
2209 }
2210
2211 const char *bdrv_get_format_name(BlockDriverState *bs)
2212 {
2213 return bs->drv ? bs->drv->format_name : NULL;
2214 }
2215
2216 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
2217 void *opaque)
2218 {
2219 BlockDriver *drv;
2220
2221 QLIST_FOREACH(drv, &bdrv_drivers, list) {
2222 it(opaque, drv->format_name);
2223 }
2224 }
2225
2226 BlockDriverState *bdrv_find(const char *name)
2227 {
2228 BlockDriverState *bs;
2229
2230 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2231 if (!strcmp(name, bs->device_name)) {
2232 return bs;
2233 }
2234 }
2235 return NULL;
2236 }
2237
2238 BlockDriverState *bdrv_next(BlockDriverState *bs)
2239 {
2240 if (!bs) {
2241 return QTAILQ_FIRST(&bdrv_states);
2242 }
2243 return QTAILQ_NEXT(bs, list);
2244 }
2245
2246 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
2247 {
2248 BlockDriverState *bs;
2249
2250 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2251 it(opaque, bs);
2252 }
2253 }
2254
2255 const char *bdrv_get_device_name(BlockDriverState *bs)
2256 {
2257 return bs->device_name;
2258 }
2259
2260 int bdrv_get_flags(BlockDriverState *bs)
2261 {
2262 return bs->open_flags;
2263 }
2264
2265 void bdrv_flush_all(void)
2266 {
2267 BlockDriverState *bs;
2268
2269 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2270 bdrv_flush(bs);
2271 }
2272 }
2273
2274 int bdrv_has_zero_init(BlockDriverState *bs)
2275 {
2276 assert(bs->drv);
2277
2278 if (bs->drv->bdrv_has_zero_init) {
2279 return bs->drv->bdrv_has_zero_init(bs);
2280 }
2281
2282 return 1;
2283 }
2284
2285 typedef struct BdrvCoIsAllocatedData {
2286 BlockDriverState *bs;
2287 int64_t sector_num;
2288 int nb_sectors;
2289 int *pnum;
2290 int ret;
2291 bool done;
2292 } BdrvCoIsAllocatedData;
2293
2294 /*
2295 * Returns true iff the specified sector is present in the disk image. Drivers
2296 * not implementing the functionality are assumed to not support backing files,
2297 * hence all their sectors are reported as allocated.
2298 *
2299 * If 'sector_num' is beyond the end of the disk image the return value is 0
2300 * and 'pnum' is set to 0.
2301 *
2302 * 'pnum' is set to the number of sectors (including and immediately following
2303 * the specified sector) that are known to be in the same
2304 * allocated/unallocated state.
2305 *
2306 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2307 * beyond the end of the disk image it will be clamped.
2308 */
2309 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2310 int nb_sectors, int *pnum)
2311 {
2312 int64_t n;
2313
2314 if (sector_num >= bs->total_sectors) {
2315 *pnum = 0;
2316 return 0;
2317 }
2318
2319 n = bs->total_sectors - sector_num;
2320 if (n < nb_sectors) {
2321 nb_sectors = n;
2322 }
2323
2324 if (!bs->drv->bdrv_co_is_allocated) {
2325 *pnum = nb_sectors;
2326 return 1;
2327 }
2328
2329 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2330 }
2331
2332 /* Coroutine wrapper for bdrv_is_allocated() */
2333 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2334 {
2335 BdrvCoIsAllocatedData *data = opaque;
2336 BlockDriverState *bs = data->bs;
2337
2338 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2339 data->pnum);
2340 data->done = true;
2341 }
2342
2343 /*
2344 * Synchronous wrapper around bdrv_co_is_allocated().
2345 *
2346 * See bdrv_co_is_allocated() for details.
2347 */
2348 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2349 int *pnum)
2350 {
2351 Coroutine *co;
2352 BdrvCoIsAllocatedData data = {
2353 .bs = bs,
2354 .sector_num = sector_num,
2355 .nb_sectors = nb_sectors,
2356 .pnum = pnum,
2357 .done = false,
2358 };
2359
2360 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2361 qemu_coroutine_enter(co, &data);
2362 while (!data.done) {
2363 qemu_aio_wait();
2364 }
2365 return data.ret;
2366 }
2367
2368 /*
2369 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2370 *
2371 * Return true if the given sector is allocated in any image between
2372 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2373 * sector is allocated in any image of the chain. Return false otherwise.
2374 *
2375 * 'pnum' is set to the number of sectors (including and immediately following
2376 * the specified sector) that are known to be in the same
2377 * allocated/unallocated state.
2378 *
2379 */
2380 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2381 BlockDriverState *base,
2382 int64_t sector_num,
2383 int nb_sectors, int *pnum)
2384 {
2385 BlockDriverState *intermediate;
2386 int ret, n = nb_sectors;
2387
2388 intermediate = top;
2389 while (intermediate && intermediate != base) {
2390 int pnum_inter;
2391 ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
2392 &pnum_inter);
2393 if (ret < 0) {
2394 return ret;
2395 } else if (ret) {
2396 *pnum = pnum_inter;
2397 return 1;
2398 }
2399
2400 /*
2401 * [sector_num, nb_sectors] is unallocated on top but intermediate
2402 * might have
2403 *
2404 * [sector_num+x, nr_sectors] allocated.
2405 */
2406 if (n > pnum_inter) {
2407 n = pnum_inter;
2408 }
2409
2410 intermediate = intermediate->backing_hd;
2411 }
2412
2413 *pnum = n;
2414 return 0;
2415 }
2416
2417 BlockInfoList *qmp_query_block(Error **errp)
2418 {
2419 BlockInfoList *head = NULL, *cur_item = NULL;
2420 BlockDriverState *bs;
2421
2422 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2423 BlockInfoList *info = g_malloc0(sizeof(*info));
2424
2425 info->value = g_malloc0(sizeof(*info->value));
2426 info->value->device = g_strdup(bs->device_name);
2427 info->value->type = g_strdup("unknown");
2428 info->value->locked = bdrv_dev_is_medium_locked(bs);
2429 info->value->removable = bdrv_dev_has_removable_media(bs);
2430
2431 if (bdrv_dev_has_removable_media(bs)) {
2432 info->value->has_tray_open = true;
2433 info->value->tray_open = bdrv_dev_is_tray_open(bs);
2434 }
2435
2436 if (bdrv_iostatus_is_enabled(bs)) {
2437 info->value->has_io_status = true;
2438 info->value->io_status = bs->iostatus;
2439 }
2440
2441 if (bs->drv) {
2442 info->value->has_inserted = true;
2443 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2444 info->value->inserted->file = g_strdup(bs->filename);
2445 info->value->inserted->ro = bs->read_only;
2446 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2447 info->value->inserted->encrypted = bs->encrypted;
2448 if (bs->backing_file[0]) {
2449 info->value->inserted->has_backing_file = true;
2450 info->value->inserted->backing_file = g_strdup(bs->backing_file);
2451 }
2452
2453 if (bs->io_limits_enabled) {
2454 info->value->inserted->bps =
2455 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2456 info->value->inserted->bps_rd =
2457 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2458 info->value->inserted->bps_wr =
2459 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2460 info->value->inserted->iops =
2461 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2462 info->value->inserted->iops_rd =
2463 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2464 info->value->inserted->iops_wr =
2465 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2466 }
2467 }
2468
2469 /* XXX: waiting for the qapi to support GSList */
2470 if (!cur_item) {
2471 head = cur_item = info;
2472 } else {
2473 cur_item->next = info;
2474 cur_item = info;
2475 }
2476 }
2477
2478 return head;
2479 }
2480
2481 /* Consider exposing this as a full fledged QMP command */
2482 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
2483 {
2484 BlockStats *s;
2485
2486 s = g_malloc0(sizeof(*s));
2487
2488 if (bs->device_name[0]) {
2489 s->has_device = true;
2490 s->device = g_strdup(bs->device_name);
2491 }
2492
2493 s->stats = g_malloc0(sizeof(*s->stats));
2494 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2495 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2496 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2497 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2498 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2499 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2500 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2501 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2502 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2503
2504 if (bs->file) {
2505 s->has_parent = true;
2506 s->parent = qmp_query_blockstat(bs->file, NULL);
2507 }
2508
2509 return s;
2510 }
2511
2512 BlockStatsList *qmp_query_blockstats(Error **errp)
2513 {
2514 BlockStatsList *head = NULL, *cur_item = NULL;
2515 BlockDriverState *bs;
2516
2517 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2518 BlockStatsList *info = g_malloc0(sizeof(*info));
2519 info->value = qmp_query_blockstat(bs, NULL);
2520
2521 /* XXX: waiting for the qapi to support GSList */
2522 if (!cur_item) {
2523 head = cur_item = info;
2524 } else {
2525 cur_item->next = info;
2526 cur_item = info;
2527 }
2528 }
2529
2530 return head;
2531 }
2532
2533 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2534 {
2535 if (bs->backing_hd && bs->backing_hd->encrypted)
2536 return bs->backing_file;
2537 else if (bs->encrypted)
2538 return bs->filename;
2539 else
2540 return NULL;
2541 }
2542
2543 void bdrv_get_backing_filename(BlockDriverState *bs,
2544 char *filename, int filename_size)
2545 {
2546 pstrcpy(filename, filename_size, bs->backing_file);
2547 }
2548
2549 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
2550 const uint8_t *buf, int nb_sectors)
2551 {
2552 BlockDriver *drv = bs->drv;
2553 if (!drv)
2554 return -ENOMEDIUM;
2555 if (!drv->bdrv_write_compressed)
2556 return -ENOTSUP;
2557 if (bdrv_check_request(bs, sector_num, nb_sectors))
2558 return -EIO;
2559
2560 if (bs->dirty_bitmap) {
2561 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2562 }
2563
2564 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2565 }
2566
2567 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2568 {
2569 BlockDriver *drv = bs->drv;
2570 if (!drv)
2571 return -ENOMEDIUM;
2572 if (!drv->bdrv_get_info)
2573 return -ENOTSUP;
2574 memset(bdi, 0, sizeof(*bdi));
2575 return drv->bdrv_get_info(bs, bdi);
2576 }
2577
2578 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2579 int64_t pos, int size)
2580 {
2581 BlockDriver *drv = bs->drv;
2582 if (!drv)
2583 return -ENOMEDIUM;
2584 if (drv->bdrv_save_vmstate)
2585 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2586 if (bs->file)
2587 return bdrv_save_vmstate(bs->file, buf, pos, size);
2588 return -ENOTSUP;
2589 }
2590
2591 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2592 int64_t pos, int size)
2593 {
2594 BlockDriver *drv = bs->drv;
2595 if (!drv)
2596 return -ENOMEDIUM;
2597 if (drv->bdrv_load_vmstate)
2598 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2599 if (bs->file)
2600 return bdrv_load_vmstate(bs->file, buf, pos, size);
2601 return -ENOTSUP;
2602 }
2603
2604 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2605 {
2606 BlockDriver *drv = bs->drv;
2607
2608 if (!drv || !drv->bdrv_debug_event) {
2609 return;
2610 }
2611
2612 return drv->bdrv_debug_event(bs, event);
2613
2614 }
2615
2616 /**************************************************************/
2617 /* handling of snapshots */
2618
2619 int bdrv_can_snapshot(BlockDriverState *bs)
2620 {
2621 BlockDriver *drv = bs->drv;
2622 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
2623 return 0;
2624 }
2625
2626 if (!drv->bdrv_snapshot_create) {
2627 if (bs->file != NULL) {
2628 return bdrv_can_snapshot(bs->file);
2629 }
2630 return 0;
2631 }
2632
2633 return 1;
2634 }
2635
2636 int bdrv_is_snapshot(BlockDriverState *bs)
2637 {
2638 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2639 }
2640
2641 BlockDriverState *bdrv_snapshots(void)
2642 {
2643 BlockDriverState *bs;
2644
2645 if (bs_snapshots) {
2646 return bs_snapshots;
2647 }
2648
2649 bs = NULL;
2650 while ((bs = bdrv_next(bs))) {
2651 if (bdrv_can_snapshot(bs)) {
2652 bs_snapshots = bs;
2653 return bs;
2654 }
2655 }
2656 return NULL;
2657 }
2658
2659 int bdrv_snapshot_create(BlockDriverState *bs,
2660 QEMUSnapshotInfo *sn_info)
2661 {
2662 BlockDriver *drv = bs->drv;
2663 if (!drv)
2664 return -ENOMEDIUM;
2665 if (drv->bdrv_snapshot_create)
2666 return drv->bdrv_snapshot_create(bs, sn_info);
2667 if (bs->file)
2668 return bdrv_snapshot_create(bs->file, sn_info);
2669 return -ENOTSUP;
2670 }
2671
2672 int bdrv_snapshot_goto(BlockDriverState *bs,
2673 const char *snapshot_id)
2674 {
2675 BlockDriver *drv = bs->drv;
2676 int ret, open_ret;
2677
2678 if (!drv)
2679 return -ENOMEDIUM;
2680 if (drv->bdrv_snapshot_goto)
2681 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2682
2683 if (bs->file) {
2684 drv->bdrv_close(bs);
2685 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2686 open_ret = drv->bdrv_open(bs, bs->open_flags);
2687 if (open_ret < 0) {
2688 bdrv_delete(bs->file);
2689 bs->drv = NULL;
2690 return open_ret;
2691 }
2692 return ret;
2693 }
2694
2695 return -ENOTSUP;
2696 }
2697
2698 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2699 {
2700 BlockDriver *drv = bs->drv;
2701 if (!drv)
2702 return -ENOMEDIUM;
2703 if (drv->bdrv_snapshot_delete)
2704 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2705 if (bs->file)
2706 return bdrv_snapshot_delete(bs->file, snapshot_id);
2707 return -ENOTSUP;
2708 }
2709
2710 int bdrv_snapshot_list(BlockDriverState *bs,
2711 QEMUSnapshotInfo **psn_info)
2712 {
2713 BlockDriver *drv = bs->drv;
2714 if (!drv)
2715 return -ENOMEDIUM;
2716 if (drv->bdrv_snapshot_list)
2717 return drv->bdrv_snapshot_list(bs, psn_info);
2718 if (bs->file)
2719 return bdrv_snapshot_list(bs->file, psn_info);
2720 return -ENOTSUP;
2721 }
2722
2723 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2724 const char *snapshot_name)
2725 {
2726 BlockDriver *drv = bs->drv;
2727 if (!drv) {
2728 return -ENOMEDIUM;
2729 }
2730 if (!bs->read_only) {
2731 return -EINVAL;
2732 }
2733 if (drv->bdrv_snapshot_load_tmp) {
2734 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2735 }
2736 return -ENOTSUP;
2737 }
2738
2739 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
2740 const char *backing_file)
2741 {
2742 if (!bs->drv) {
2743 return NULL;
2744 }
2745
2746 if (bs->backing_hd) {
2747 if (strcmp(bs->backing_file, backing_file) == 0) {
2748 return bs->backing_hd;
2749 } else {
2750 return bdrv_find_backing_image(bs->backing_hd, backing_file);
2751 }
2752 }
2753
2754 return NULL;
2755 }
2756
2757 #define NB_SUFFIXES 4
2758
2759 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2760 {
2761 static const char suffixes[NB_SUFFIXES] = "KMGT";
2762 int64_t base;
2763 int i;
2764
2765 if (size <= 999) {
2766 snprintf(buf, buf_size, "%" PRId64, size);
2767 } else {
2768 base = 1024;
2769 for(i = 0; i < NB_SUFFIXES; i++) {
2770 if (size < (10 * base)) {
2771 snprintf(buf, buf_size, "%0.1f%c",
2772 (double)size / base,
2773 suffixes[i]);
2774 break;
2775 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2776 snprintf(buf, buf_size, "%" PRId64 "%c",
2777 ((size + (base >> 1)) / base),
2778 suffixes[i]);
2779 break;
2780 }
2781 base = base * 1024;
2782 }
2783 }
2784 return buf;
2785 }
2786
2787 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2788 {
2789 char buf1[128], date_buf[128], clock_buf[128];
2790 #ifdef _WIN32
2791 struct tm *ptm;
2792 #else
2793 struct tm tm;
2794 #endif
2795 time_t ti;
2796 int64_t secs;
2797
2798 if (!sn) {
2799 snprintf(buf, buf_size,
2800 "%-10s%-20s%7s%20s%15s",
2801 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2802 } else {
2803 ti = sn->date_sec;
2804 #ifdef _WIN32
2805 ptm = localtime(&ti);
2806 strftime(date_buf, sizeof(date_buf),
2807 "%Y-%m-%d %H:%M:%S", ptm);
2808 #else
2809 localtime_r(&ti, &tm);
2810 strftime(date_buf, sizeof(date_buf),
2811 "%Y-%m-%d %H:%M:%S", &tm);
2812 #endif
2813 secs = sn->vm_clock_nsec / 1000000000;
2814 snprintf(clock_buf, sizeof(clock_buf),
2815 "%02d:%02d:%02d.%03d",
2816 (int)(secs / 3600),
2817 (int)((secs / 60) % 60),
2818 (int)(secs % 60),
2819 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2820 snprintf(buf, buf_size,
2821 "%-10s%-20s%7s%20s%15s",
2822 sn->id_str, sn->name,
2823 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2824 date_buf,
2825 clock_buf);
2826 }
2827 return buf;
2828 }
2829
2830 /**************************************************************/
2831 /* async I/Os */
2832
2833 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2834 QEMUIOVector *qiov, int nb_sectors,
2835 BlockDriverCompletionFunc *cb, void *opaque)
2836 {
2837 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2838
2839 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2840 cb, opaque, false);
2841 }
2842
2843 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2844 QEMUIOVector *qiov, int nb_sectors,
2845 BlockDriverCompletionFunc *cb, void *opaque)
2846 {
2847 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2848
2849 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
2850 cb, opaque, true);
2851 }
2852
2853
2854 typedef struct MultiwriteCB {
2855 int error;
2856 int num_requests;
2857 int num_callbacks;
2858 struct {
2859 BlockDriverCompletionFunc *cb;
2860 void *opaque;
2861 QEMUIOVector *free_qiov;
2862 } callbacks[];
2863 } MultiwriteCB;
2864
2865 static void multiwrite_user_cb(MultiwriteCB *mcb)
2866 {
2867 int i;
2868
2869 for (i = 0; i < mcb->num_callbacks; i++) {
2870 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2871 if (mcb->callbacks[i].free_qiov) {
2872 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2873 }
2874 g_free(mcb->callbacks[i].free_qiov);
2875 }
2876 }
2877
2878 static void multiwrite_cb(void *opaque, int ret)
2879 {
2880 MultiwriteCB *mcb = opaque;
2881
2882 trace_multiwrite_cb(mcb, ret);
2883
2884 if (ret < 0 && !mcb->error) {
2885 mcb->error = ret;
2886 }
2887
2888 mcb->num_requests--;
2889 if (mcb->num_requests == 0) {
2890 multiwrite_user_cb(mcb);
2891 g_free(mcb);
2892 }
2893 }
2894
2895 static int multiwrite_req_compare(const void *a, const void *b)
2896 {
2897 const BlockRequest *req1 = a, *req2 = b;
2898
2899 /*
2900 * Note that we can't simply subtract req2->sector from req1->sector
2901 * here as that could overflow the return value.
2902 */
2903 if (req1->sector > req2->sector) {
2904 return 1;
2905 } else if (req1->sector < req2->sector) {
2906 return -1;
2907 } else {
2908 return 0;
2909 }
2910 }
2911
2912 /*
2913 * Takes a bunch of requests and tries to merge them. Returns the number of
2914 * requests that remain after merging.
2915 */
2916 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2917 int num_reqs, MultiwriteCB *mcb)
2918 {
2919 int i, outidx;
2920
2921 // Sort requests by start sector
2922 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2923
2924 // Check if adjacent requests touch the same clusters. If so, combine them,
2925 // filling up gaps with zero sectors.
2926 outidx = 0;
2927 for (i = 1; i < num_reqs; i++) {
2928 int merge = 0;
2929 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2930
2931 // Handle exactly sequential writes and overlapping writes.
2932 if (reqs[i].sector <= oldreq_last) {
2933 merge = 1;
2934 }
2935
2936 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2937 merge = 0;
2938 }
2939
2940 if (merge) {
2941 size_t size;
2942 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
2943 qemu_iovec_init(qiov,
2944 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2945
2946 // Add the first request to the merged one. If the requests are
2947 // overlapping, drop the last sectors of the first request.
2948 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2949 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
2950
2951 // We should need to add any zeros between the two requests
2952 assert (reqs[i].sector <= oldreq_last);
2953
2954 // Add the second request
2955 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
2956
2957 reqs[outidx].nb_sectors = qiov->size >> 9;
2958 reqs[outidx].qiov = qiov;
2959
2960 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2961 } else {
2962 outidx++;
2963 reqs[outidx].sector = reqs[i].sector;
2964 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2965 reqs[outidx].qiov = reqs[i].qiov;
2966 }
2967 }
2968
2969 return outidx + 1;
2970 }
2971
2972 /*
2973 * Submit multiple AIO write requests at once.
2974 *
2975 * On success, the function returns 0 and all requests in the reqs array have
2976 * been submitted. In error case this function returns -1, and any of the
2977 * requests may or may not be submitted yet. In particular, this means that the
2978 * callback will be called for some of the requests, for others it won't. The
2979 * caller must check the error field of the BlockRequest to wait for the right
2980 * callbacks (if error != 0, no callback will be called).
2981 *
2982 * The implementation may modify the contents of the reqs array, e.g. to merge
2983 * requests. However, the fields opaque and error are left unmodified as they
2984 * are used to signal failure for a single request to the caller.
2985 */
2986 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2987 {
2988 MultiwriteCB *mcb;
2989 int i;
2990
2991 /* don't submit writes if we don't have a medium */
2992 if (bs->drv == NULL) {
2993 for (i = 0; i < num_reqs; i++) {
2994 reqs[i].error = -ENOMEDIUM;
2995 }
2996 return -1;
2997 }
2998
2999 if (num_reqs == 0) {
3000 return 0;
3001 }
3002
3003 // Create MultiwriteCB structure
3004 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
3005 mcb->num_requests = 0;
3006 mcb->num_callbacks = num_reqs;
3007
3008 for (i = 0; i < num_reqs; i++) {
3009 mcb->callbacks[i].cb = reqs[i].cb;
3010 mcb->callbacks[i].opaque = reqs[i].opaque;
3011 }
3012
3013 // Check for mergable requests
3014 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
3015
3016 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
3017
3018 /* Run the aio requests. */
3019 mcb->num_requests = num_reqs;
3020 for (i = 0; i < num_reqs; i++) {
3021 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
3022 reqs[i].nb_sectors, multiwrite_cb, mcb);
3023 }
3024
3025 return 0;
3026 }
3027
3028 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
3029 {
3030 acb->pool->cancel(acb);
3031 }
3032
3033 /* block I/O throttling */
3034 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
3035 bool is_write, double elapsed_time, uint64_t *wait)
3036 {
3037 uint64_t bps_limit = 0;
3038 double bytes_limit, bytes_base, bytes_res;
3039 double slice_time, wait_time;
3040
3041 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3042 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
3043 } else if (bs->io_limits.bps[is_write]) {
3044 bps_limit = bs->io_limits.bps[is_write];
3045 } else {
3046 if (wait) {
3047 *wait = 0;
3048 }
3049
3050 return false;
3051 }
3052
3053 slice_time = bs->slice_end - bs->slice_start;
3054 slice_time /= (NANOSECONDS_PER_SECOND);
3055 bytes_limit = bps_limit * slice_time;
3056 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
3057 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3058 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
3059 }
3060
3061 /* bytes_base: the bytes of data which have been read/written; and
3062 * it is obtained from the history statistic info.
3063 * bytes_res: the remaining bytes of data which need to be read/written.
3064 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3065 * the total time for completing reading/writting all data.
3066 */
3067 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
3068
3069 if (bytes_base + bytes_res <= bytes_limit) {
3070 if (wait) {
3071 *wait = 0;
3072 }
3073
3074 return false;
3075 }
3076
3077 /* Calc approx time to dispatch */
3078 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
3079
3080 /* When the I/O rate at runtime exceeds the limits,
3081 * bs->slice_end need to be extended in order that the current statistic
3082 * info can be kept until the timer fire, so it is increased and tuned
3083 * based on the result of experiment.
3084 */
3085 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3086 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3087 if (wait) {
3088 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3089 }
3090
3091 return true;
3092 }
3093
3094 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
3095 double elapsed_time, uint64_t *wait)
3096 {
3097 uint64_t iops_limit = 0;
3098 double ios_limit, ios_base;
3099 double slice_time, wait_time;
3100
3101 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3102 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
3103 } else if (bs->io_limits.iops[is_write]) {
3104 iops_limit = bs->io_limits.iops[is_write];
3105 } else {
3106 if (wait) {
3107 *wait = 0;
3108 }
3109
3110 return false;
3111 }
3112
3113 slice_time = bs->slice_end - bs->slice_start;
3114 slice_time /= (NANOSECONDS_PER_SECOND);
3115 ios_limit = iops_limit * slice_time;
3116 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
3117 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3118 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
3119 }
3120
3121 if (ios_base + 1 <= ios_limit) {
3122 if (wait) {
3123 *wait = 0;
3124 }
3125
3126 return false;
3127 }
3128
3129 /* Calc approx time to dispatch */
3130 wait_time = (ios_base + 1) / iops_limit;
3131 if (wait_time > elapsed_time) {
3132 wait_time = wait_time - elapsed_time;
3133 } else {
3134 wait_time = 0;
3135 }
3136
3137 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3138 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3139 if (wait) {
3140 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3141 }
3142
3143 return true;
3144 }
3145
3146 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3147 bool is_write, int64_t *wait)
3148 {
3149 int64_t now, max_wait;
3150 uint64_t bps_wait = 0, iops_wait = 0;
3151 double elapsed_time;
3152 int bps_ret, iops_ret;
3153
3154 now = qemu_get_clock_ns(vm_clock);
3155 if ((bs->slice_start < now)
3156 && (bs->slice_end > now)) {
3157 bs->slice_end = now + bs->slice_time;
3158 } else {
3159 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3160 bs->slice_start = now;
3161 bs->slice_end = now + bs->slice_time;
3162
3163 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3164 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3165
3166 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3167 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3168 }
3169
3170 elapsed_time = now - bs->slice_start;
3171 elapsed_time /= (NANOSECONDS_PER_SECOND);
3172
3173 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3174 is_write, elapsed_time, &bps_wait);
3175 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3176 elapsed_time, &iops_wait);
3177 if (bps_ret || iops_ret) {
3178 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3179 if (wait) {
3180 *wait = max_wait;
3181 }
3182
3183 now = qemu_get_clock_ns(vm_clock);
3184 if (bs->slice_end < now + max_wait) {
3185 bs->slice_end = now + max_wait;
3186 }
3187
3188 return true;
3189 }
3190
3191 if (wait) {
3192 *wait = 0;
3193 }
3194
3195 return false;
3196 }
3197
3198 /**************************************************************/
3199 /* async block device emulation */
3200
3201 typedef struct BlockDriverAIOCBSync {
3202 BlockDriverAIOCB common;
3203 QEMUBH *bh;
3204 int ret;
3205 /* vector translation state */
3206 QEMUIOVector *qiov;
3207 uint8_t *bounce;
3208 int is_write;
3209 } BlockDriverAIOCBSync;
3210
3211 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3212 {
3213 BlockDriverAIOCBSync *acb =
3214 container_of(blockacb, BlockDriverAIOCBSync, common);
3215 qemu_bh_delete(acb->bh);
3216 acb->bh = NULL;
3217 qemu_aio_release(acb);
3218 }
3219
3220 static AIOPool bdrv_em_aio_pool = {
3221 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3222 .cancel = bdrv_aio_cancel_em,
3223 };
3224
3225 static void bdrv_aio_bh_cb(void *opaque)
3226 {
3227 BlockDriverAIOCBSync *acb = opaque;
3228
3229 if (!acb->is_write)
3230 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
3231 qemu_vfree(acb->bounce);
3232 acb->common.cb(acb->common.opaque, acb->ret);
3233 qemu_bh_delete(acb->bh);
3234 acb->bh = NULL;
3235 qemu_aio_release(acb);
3236 }
3237
3238 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3239 int64_t sector_num,
3240 QEMUIOVector *qiov,
3241 int nb_sectors,
3242 BlockDriverCompletionFunc *cb,
3243 void *opaque,
3244 int is_write)
3245
3246 {
3247 BlockDriverAIOCBSync *acb;
3248
3249 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
3250 acb->is_write = is_write;
3251 acb->qiov = qiov;
3252 acb->bounce = qemu_blockalign(bs, qiov->size);
3253 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
3254
3255 if (is_write) {
3256 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
3257 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
3258 } else {
3259 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
3260 }
3261
3262 qemu_bh_schedule(acb->bh);
3263
3264 return &acb->common;
3265 }
3266
3267 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3268 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3269 BlockDriverCompletionFunc *cb, void *opaque)
3270 {
3271 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
3272 }
3273
3274 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3275 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3276 BlockDriverCompletionFunc *cb, void *opaque)
3277 {
3278 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3279 }
3280
3281
3282 typedef struct BlockDriverAIOCBCoroutine {
3283 BlockDriverAIOCB common;
3284 BlockRequest req;
3285 bool is_write;
3286 QEMUBH* bh;
3287 } BlockDriverAIOCBCoroutine;
3288
3289 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3290 {
3291 qemu_aio_flush();
3292 }
3293
3294 static AIOPool bdrv_em_co_aio_pool = {
3295 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3296 .cancel = bdrv_aio_co_cancel_em,
3297 };
3298
3299 static void bdrv_co_em_bh(void *opaque)
3300 {
3301 BlockDriverAIOCBCoroutine *acb = opaque;
3302
3303 acb->common.cb(acb->common.opaque, acb->req.error);
3304 qemu_bh_delete(acb->bh);
3305 qemu_aio_release(acb);
3306 }
3307
3308 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3309 static void coroutine_fn bdrv_co_do_rw(void *opaque)
3310 {
3311 BlockDriverAIOCBCoroutine *acb = opaque;
3312 BlockDriverState *bs = acb->common.bs;
3313
3314 if (!acb->is_write) {
3315 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
3316 acb->req.nb_sectors, acb->req.qiov, 0);
3317 } else {
3318 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
3319 acb->req.nb_sectors, acb->req.qiov, 0);
3320 }
3321
3322 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3323 qemu_bh_schedule(acb->bh);
3324 }
3325
3326 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3327 int64_t sector_num,
3328 QEMUIOVector *qiov,
3329 int nb_sectors,
3330 BlockDriverCompletionFunc *cb,
3331 void *opaque,
3332 bool is_write)
3333 {
3334 Coroutine *co;
3335 BlockDriverAIOCBCoroutine *acb;
3336
3337 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3338 acb->req.sector = sector_num;
3339 acb->req.nb_sectors = nb_sectors;
3340 acb->req.qiov = qiov;
3341 acb->is_write = is_write;
3342
3343 co = qemu_coroutine_create(bdrv_co_do_rw);
3344 qemu_coroutine_enter(co, acb);
3345
3346 return &acb->common;
3347 }
3348
3349 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
3350 {
3351 BlockDriverAIOCBCoroutine *acb = opaque;
3352 BlockDriverState *bs = acb->common.bs;
3353
3354 acb->req.error = bdrv_co_flush(bs);
3355 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3356 qemu_bh_schedule(acb->bh);
3357 }
3358
3359 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
3360 BlockDriverCompletionFunc *cb, void *opaque)
3361 {
3362 trace_bdrv_aio_flush(bs, opaque);
3363
3364 Coroutine *co;
3365 BlockDriverAIOCBCoroutine *acb;
3366
3367 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3368 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3369 qemu_coroutine_enter(co, acb);
3370
3371 return &acb->common;
3372 }
3373
3374 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3375 {
3376 BlockDriverAIOCBCoroutine *acb = opaque;
3377 BlockDriverState *bs = acb->common.bs;
3378
3379 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3380 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3381 qemu_bh_schedule(acb->bh);
3382 }
3383
3384 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3385 int64_t sector_num, int nb_sectors,
3386 BlockDriverCompletionFunc *cb, void *opaque)
3387 {
3388 Coroutine *co;
3389 BlockDriverAIOCBCoroutine *acb;
3390
3391 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3392
3393 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3394 acb->req.sector = sector_num;
3395 acb->req.nb_sectors = nb_sectors;
3396 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3397 qemu_coroutine_enter(co, acb);
3398
3399 return &acb->common;
3400 }
3401
3402 void bdrv_init(void)
3403 {
3404 module_call_init(MODULE_INIT_BLOCK);
3405 }
3406
3407 void bdrv_init_with_whitelist(void)
3408 {
3409 use_bdrv_whitelist = 1;
3410 bdrv_init();
3411 }
3412
3413 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3414 BlockDriverCompletionFunc *cb, void *opaque)
3415 {
3416 BlockDriverAIOCB *acb;
3417
3418 if (pool->free_aiocb) {
3419 acb = pool->free_aiocb;
3420 pool->free_aiocb = acb->next;
3421 } else {
3422 acb = g_malloc0(pool->aiocb_size);
3423 acb->pool = pool;
3424 }
3425 acb->bs = bs;
3426 acb->cb = cb;
3427 acb->opaque = opaque;
3428 return acb;
3429 }
3430
3431 void qemu_aio_release(void *p)
3432 {
3433 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3434 AIOPool *pool = acb->pool;
3435 acb->next = pool->free_aiocb;
3436 pool->free_aiocb = acb;
3437 }
3438
3439 /**************************************************************/
3440 /* Coroutine block device emulation */
3441
3442 typedef struct CoroutineIOCompletion {
3443 Coroutine *coroutine;
3444 int ret;
3445 } CoroutineIOCompletion;
3446
3447 static void bdrv_co_io_em_complete(void *opaque, int ret)
3448 {
3449 CoroutineIOCompletion *co = opaque;
3450
3451 co->ret = ret;
3452 qemu_coroutine_enter(co->coroutine, NULL);
3453 }
3454
3455 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3456 int nb_sectors, QEMUIOVector *iov,
3457 bool is_write)
3458 {
3459 CoroutineIOCompletion co = {
3460 .coroutine = qemu_coroutine_self(),
3461 };
3462 BlockDriverAIOCB *acb;
3463
3464 if (is_write) {
3465 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3466 bdrv_co_io_em_complete, &co);
3467 } else {
3468 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3469 bdrv_co_io_em_complete, &co);
3470 }
3471
3472 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
3473 if (!acb) {
3474 return -EIO;
3475 }
3476 qemu_coroutine_yield();
3477
3478 return co.ret;
3479 }
3480
3481 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3482 int64_t sector_num, int nb_sectors,
3483 QEMUIOVector *iov)
3484 {
3485 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3486 }
3487
3488 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3489 int64_t sector_num, int nb_sectors,
3490 QEMUIOVector *iov)
3491 {
3492 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3493 }
3494
3495 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
3496 {
3497 RwCo *rwco = opaque;
3498
3499 rwco->ret = bdrv_co_flush(rwco->bs);
3500 }
3501
3502 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3503 {
3504 int ret;
3505
3506 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
3507 return 0;
3508 }
3509
3510 /* Write back cached data to the OS even with cache=unsafe */
3511 if (bs->drv->bdrv_co_flush_to_os) {
3512 ret = bs->drv->bdrv_co_flush_to_os(bs);
3513 if (ret < 0) {
3514 return ret;
3515 }
3516 }
3517
3518 /* But don't actually force it to the disk with cache=unsafe */
3519 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3520 return 0;
3521 }
3522
3523 if (bs->drv->bdrv_co_flush_to_disk) {
3524 ret = bs->drv->bdrv_co_flush_to_disk(bs);
3525 } else if (bs->drv->bdrv_aio_flush) {
3526 BlockDriverAIOCB *acb;
3527 CoroutineIOCompletion co = {
3528 .coroutine = qemu_coroutine_self(),
3529 };
3530
3531 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3532 if (acb == NULL) {
3533 ret = -EIO;
3534 } else {
3535 qemu_coroutine_yield();
3536 ret = co.ret;
3537 }
3538 } else {
3539 /*
3540 * Some block drivers always operate in either writethrough or unsafe
3541 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3542 * know how the server works (because the behaviour is hardcoded or
3543 * depends on server-side configuration), so we can't ensure that
3544 * everything is safe on disk. Returning an error doesn't work because
3545 * that would break guests even if the server operates in writethrough
3546 * mode.
3547 *
3548 * Let's hope the user knows what he's doing.
3549 */
3550 ret = 0;
3551 }
3552 if (ret < 0) {
3553 return ret;
3554 }
3555
3556 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3557 * in the case of cache=unsafe, so there are no useless flushes.
3558 */
3559 return bdrv_co_flush(bs->file);
3560 }
3561
3562 void bdrv_invalidate_cache(BlockDriverState *bs)
3563 {
3564 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3565 bs->drv->bdrv_invalidate_cache(bs);
3566 }
3567 }
3568
3569 void bdrv_invalidate_cache_all(void)
3570 {
3571 BlockDriverState *bs;
3572
3573 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3574 bdrv_invalidate_cache(bs);
3575 }
3576 }
3577
3578 void bdrv_clear_incoming_migration_all(void)
3579 {
3580 BlockDriverState *bs;
3581
3582 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3583 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
3584 }
3585 }
3586
3587 int bdrv_flush(BlockDriverState *bs)
3588 {
3589 Coroutine *co;
3590 RwCo rwco = {
3591 .bs = bs,
3592 .ret = NOT_DONE,
3593 };
3594
3595 if (qemu_in_coroutine()) {
3596 /* Fast-path if already in coroutine context */
3597 bdrv_flush_co_entry(&rwco);
3598 } else {
3599 co = qemu_coroutine_create(bdrv_flush_co_entry);
3600 qemu_coroutine_enter(co, &rwco);
3601 while (rwco.ret == NOT_DONE) {
3602 qemu_aio_wait();
3603 }
3604 }
3605
3606 return rwco.ret;
3607 }
3608
3609 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3610 {
3611 RwCo *rwco = opaque;
3612
3613 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3614 }
3615
3616 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3617 int nb_sectors)
3618 {
3619 if (!bs->drv) {
3620 return -ENOMEDIUM;
3621 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3622 return -EIO;
3623 } else if (bs->read_only) {
3624 return -EROFS;
3625 } else if (bs->drv->bdrv_co_discard) {
3626 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3627 } else if (bs->drv->bdrv_aio_discard) {
3628 BlockDriverAIOCB *acb;
3629 CoroutineIOCompletion co = {
3630 .coroutine = qemu_coroutine_self(),
3631 };
3632
3633 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3634 bdrv_co_io_em_complete, &co);
3635 if (acb == NULL) {
3636 return -EIO;
3637 } else {
3638 qemu_coroutine_yield();
3639 return co.ret;
3640 }
3641 } else {
3642 return 0;
3643 }
3644 }
3645
3646 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3647 {
3648 Coroutine *co;
3649 RwCo rwco = {
3650 .bs = bs,
3651 .sector_num = sector_num,
3652 .nb_sectors = nb_sectors,
3653 .ret = NOT_DONE,
3654 };
3655
3656 if (qemu_in_coroutine()) {
3657 /* Fast-path if already in coroutine context */
3658 bdrv_discard_co_entry(&rwco);
3659 } else {
3660 co = qemu_coroutine_create(bdrv_discard_co_entry);
3661 qemu_coroutine_enter(co, &rwco);
3662 while (rwco.ret == NOT_DONE) {
3663 qemu_aio_wait();
3664 }
3665 }
3666
3667 return rwco.ret;
3668 }
3669
3670 /**************************************************************/
3671 /* removable device support */
3672
3673 /**
3674 * Return TRUE if the media is present
3675 */
3676 int bdrv_is_inserted(BlockDriverState *bs)
3677 {
3678 BlockDriver *drv = bs->drv;
3679
3680 if (!drv)
3681 return 0;
3682 if (!drv->bdrv_is_inserted)
3683 return 1;
3684 return drv->bdrv_is_inserted(bs);
3685 }
3686
3687 /**
3688 * Return whether the media changed since the last call to this
3689 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3690 */
3691 int bdrv_media_changed(BlockDriverState *bs)
3692 {
3693 BlockDriver *drv = bs->drv;
3694
3695 if (drv && drv->bdrv_media_changed) {
3696 return drv->bdrv_media_changed(bs);
3697 }
3698 return -ENOTSUP;
3699 }
3700
3701 /**
3702 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3703 */
3704 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
3705 {
3706 BlockDriver *drv = bs->drv;
3707
3708 if (drv && drv->bdrv_eject) {
3709 drv->bdrv_eject(bs, eject_flag);
3710 }
3711
3712 if (bs->device_name[0] != '\0') {
3713 bdrv_emit_qmp_eject_event(bs, eject_flag);
3714 }
3715 }
3716
3717 /**
3718 * Lock or unlock the media (if it is locked, the user won't be able
3719 * to eject it manually).
3720 */
3721 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
3722 {
3723 BlockDriver *drv = bs->drv;
3724
3725 trace_bdrv_lock_medium(bs, locked);
3726
3727 if (drv && drv->bdrv_lock_medium) {
3728 drv->bdrv_lock_medium(bs, locked);
3729 }
3730 }
3731
3732 /* needed for generic scsi interface */
3733
3734 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3735 {
3736 BlockDriver *drv = bs->drv;
3737
3738 if (drv && drv->bdrv_ioctl)
3739 return drv->bdrv_ioctl(bs, req, buf);
3740 return -ENOTSUP;
3741 }
3742
3743 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3744 unsigned long int req, void *buf,
3745 BlockDriverCompletionFunc *cb, void *opaque)
3746 {
3747 BlockDriver *drv = bs->drv;
3748
3749 if (drv && drv->bdrv_aio_ioctl)
3750 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3751 return NULL;
3752 }
3753
3754 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3755 {
3756 bs->buffer_alignment = align;
3757 }
3758
3759 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3760 {
3761 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3762 }
3763
3764 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3765 {
3766 int64_t bitmap_size;
3767
3768 bs->dirty_count = 0;
3769 if (enable) {
3770 if (!bs->dirty_bitmap) {
3771 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
3772 BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
3773 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
3774
3775 bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
3776 }
3777 } else {
3778 if (bs->dirty_bitmap) {
3779 g_free(bs->dirty_bitmap);
3780 bs->dirty_bitmap = NULL;
3781 }
3782 }
3783 }
3784
3785 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
3786 {
3787 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
3788
3789 if (bs->dirty_bitmap &&
3790 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
3791 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
3792 (1UL << (chunk % (sizeof(unsigned long) * 8))));
3793 } else {
3794 return 0;
3795 }
3796 }
3797
3798 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
3799 int nr_sectors)
3800 {
3801 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
3802 }
3803
3804 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
3805 {
3806 return bs->dirty_count;
3807 }
3808
3809 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
3810 {
3811 assert(bs->in_use != in_use);
3812 bs->in_use = in_use;
3813 }
3814
3815 int bdrv_in_use(BlockDriverState *bs)
3816 {
3817 return bs->in_use;
3818 }
3819
3820 void bdrv_iostatus_enable(BlockDriverState *bs)
3821 {
3822 bs->iostatus_enabled = true;
3823 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3824 }
3825
3826 /* The I/O status is only enabled if the drive explicitly
3827 * enables it _and_ the VM is configured to stop on errors */
3828 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
3829 {
3830 return (bs->iostatus_enabled &&
3831 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
3832 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
3833 bs->on_read_error == BLOCK_ERR_STOP_ANY));
3834 }
3835
3836 void bdrv_iostatus_disable(BlockDriverState *bs)
3837 {
3838 bs->iostatus_enabled = false;
3839 }
3840
3841 void bdrv_iostatus_reset(BlockDriverState *bs)
3842 {
3843 if (bdrv_iostatus_is_enabled(bs)) {
3844 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3845 }
3846 }
3847
3848 /* XXX: Today this is set by device models because it makes the implementation
3849 quite simple. However, the block layer knows about the error, so it's
3850 possible to implement this without device models being involved */
3851 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
3852 {
3853 if (bdrv_iostatus_is_enabled(bs) &&
3854 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
3855 assert(error >= 0);
3856 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
3857 BLOCK_DEVICE_IO_STATUS_FAILED;
3858 }
3859 }
3860
3861 void
3862 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
3863 enum BlockAcctType type)
3864 {
3865 assert(type < BDRV_MAX_IOTYPE);
3866
3867 cookie->bytes = bytes;
3868 cookie->start_time_ns = get_clock();
3869 cookie->type = type;
3870 }
3871
3872 void
3873 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
3874 {
3875 assert(cookie->type < BDRV_MAX_IOTYPE);
3876
3877 bs->nr_bytes[cookie->type] += cookie->bytes;
3878 bs->nr_ops[cookie->type]++;
3879 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
3880 }
3881
3882 int bdrv_img_create(const char *filename, const char *fmt,
3883 const char *base_filename, const char *base_fmt,
3884 char *options, uint64_t img_size, int flags)
3885 {
3886 QEMUOptionParameter *param = NULL, *create_options = NULL;
3887 QEMUOptionParameter *backing_fmt, *backing_file, *size;
3888 BlockDriverState *bs = NULL;
3889 BlockDriver *drv, *proto_drv;
3890 BlockDriver *backing_drv = NULL;
3891 int ret = 0;
3892
3893 /* Find driver and parse its options */
3894 drv = bdrv_find_format(fmt);
3895 if (!drv) {
3896 error_report("Unknown file format '%s'", fmt);
3897 ret = -EINVAL;
3898 goto out;
3899 }
3900
3901 proto_drv = bdrv_find_protocol(filename);
3902 if (!proto_drv) {
3903 error_report("Unknown protocol '%s'", filename);
3904 ret = -EINVAL;
3905 goto out;
3906 }
3907
3908 create_options = append_option_parameters(create_options,
3909 drv->create_options);
3910 create_options = append_option_parameters(create_options,
3911 proto_drv->create_options);
3912
3913 /* Create parameter list with default values */
3914 param = parse_option_parameters("", create_options, param);
3915
3916 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
3917
3918 /* Parse -o options */
3919 if (options) {
3920 param = parse_option_parameters(options, create_options, param);
3921 if (param == NULL) {
3922 error_report("Invalid options for file format '%s'.", fmt);
3923 ret = -EINVAL;
3924 goto out;
3925 }
3926 }
3927
3928 if (base_filename) {
3929 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
3930 base_filename)) {
3931 error_report("Backing file not supported for file format '%s'",
3932 fmt);
3933 ret = -EINVAL;
3934 goto out;
3935 }
3936 }
3937
3938 if (base_fmt) {
3939 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
3940 error_report("Backing file format not supported for file "
3941 "format '%s'", fmt);
3942 ret = -EINVAL;
3943 goto out;
3944 }
3945 }
3946
3947 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
3948 if (backing_file && backing_file->value.s) {
3949 if (!strcmp(filename, backing_file->value.s)) {
3950 error_report("Error: Trying to create an image with the "
3951 "same filename as the backing file");
3952 ret = -EINVAL;
3953 goto out;
3954 }
3955 }
3956
3957 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
3958 if (backing_fmt && backing_fmt->value.s) {
3959 backing_drv = bdrv_find_format(backing_fmt->value.s);
3960 if (!backing_drv) {
3961 error_report("Unknown backing file format '%s'",
3962 backing_fmt->value.s);
3963 ret = -EINVAL;
3964 goto out;
3965 }
3966 }
3967
3968 // The size for the image must always be specified, with one exception:
3969 // If we are using a backing file, we can obtain the size from there
3970 size = get_option_parameter(param, BLOCK_OPT_SIZE);
3971 if (size && size->value.n == -1) {
3972 if (backing_file && backing_file->value.s) {
3973 uint64_t size;
3974 char buf[32];
3975 int back_flags;
3976
3977 /* backing files always opened read-only */
3978 back_flags =
3979 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
3980
3981 bs = bdrv_new("");
3982
3983 ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
3984 if (ret < 0) {
3985 error_report("Could not open '%s'", backing_file->value.s);
3986 goto out;
3987 }
3988 bdrv_get_geometry(bs, &size);
3989 size *= 512;
3990
3991 snprintf(buf, sizeof(buf), "%" PRId64, size);
3992 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
3993 } else {
3994 error_report("Image creation needs a size parameter");
3995 ret = -EINVAL;
3996 goto out;
3997 }
3998 }
3999
4000 printf("Formatting '%s', fmt=%s ", filename, fmt);
4001 print_option_parameters(param);
4002 puts("");
4003
4004 ret = bdrv_create(drv, filename, param);
4005
4006 if (ret < 0) {
4007 if (ret == -ENOTSUP) {
4008 error_report("Formatting or formatting option not supported for "
4009 "file format '%s'", fmt);
4010 } else if (ret == -EFBIG) {
4011 error_report("The image size is too large for file format '%s'",
4012 fmt);
4013 } else {
4014 error_report("%s: error while creating %s: %s", filename, fmt,
4015 strerror(-ret));
4016 }
4017 }
4018
4019 out:
4020 free_option_parameters(create_options);
4021 free_option_parameters(param);
4022
4023 if (bs) {
4024 bdrv_delete(bs);
4025 }
4026
4027 return ret;
4028 }
4029
4030 void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
4031 int64_t speed, BlockDriverCompletionFunc *cb,
4032 void *opaque, Error **errp)
4033 {
4034 BlockJob *job;
4035
4036 if (bs->job || bdrv_in_use(bs)) {
4037 error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
4038 return NULL;
4039 }
4040 bdrv_set_in_use(bs, 1);
4041
4042 job = g_malloc0(job_type->instance_size);
4043 job->job_type = job_type;
4044 job->bs = bs;
4045 job->cb = cb;
4046 job->opaque = opaque;
4047 job->busy = true;
4048 bs->job = job;
4049
4050 /* Only set speed when necessary to avoid NotSupported error */
4051 if (speed != 0) {
4052 Error *local_err = NULL;
4053
4054 block_job_set_speed(job, speed, &local_err);
4055 if (error_is_set(&local_err)) {
4056 bs->job = NULL;
4057 g_free(job);
4058 bdrv_set_in_use(bs, 0);
4059 error_propagate(errp, local_err);
4060 return NULL;
4061 }
4062 }
4063 return job;
4064 }
4065
4066 void block_job_complete(BlockJob *job, int ret)
4067 {
4068 BlockDriverState *bs = job->bs;
4069
4070 assert(bs->job == job);
4071 job->cb(job->opaque, ret);
4072 bs->job = NULL;
4073 g_free(job);
4074 bdrv_set_in_use(bs, 0);
4075 }
4076
4077 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
4078 {
4079 Error *local_err = NULL;
4080
4081 if (!job->job_type->set_speed) {
4082 error_set(errp, QERR_NOT_SUPPORTED);
4083 return;
4084 }
4085 job->job_type->set_speed(job, speed, &local_err);
4086 if (error_is_set(&local_err)) {
4087 error_propagate(errp, local_err);
4088 return;
4089 }
4090
4091 job->speed = speed;
4092 }
4093
4094 void block_job_cancel(BlockJob *job)
4095 {
4096 job->cancelled = true;
4097 if (job->co && !job->busy) {
4098 qemu_coroutine_enter(job->co, NULL);
4099 }
4100 }
4101
4102 bool block_job_is_cancelled(BlockJob *job)
4103 {
4104 return job->cancelled;
4105 }
4106
4107 struct BlockCancelData {
4108 BlockJob *job;
4109 BlockDriverCompletionFunc *cb;
4110 void *opaque;
4111 bool cancelled;
4112 int ret;
4113 };
4114
4115 static void block_job_cancel_cb(void *opaque, int ret)
4116 {
4117 struct BlockCancelData *data = opaque;
4118
4119 data->cancelled = block_job_is_cancelled(data->job);
4120 data->ret = ret;
4121 data->cb(data->opaque, ret);
4122 }
4123
4124 int block_job_cancel_sync(BlockJob *job)
4125 {
4126 struct BlockCancelData data;
4127 BlockDriverState *bs = job->bs;
4128
4129 assert(bs->job == job);
4130
4131 /* Set up our own callback to store the result and chain to
4132 * the original callback.
4133 */
4134 data.job = job;
4135 data.cb = job->cb;
4136 data.opaque = job->opaque;
4137 data.ret = -EINPROGRESS;
4138 job->cb = block_job_cancel_cb;
4139 job->opaque = &data;
4140 block_job_cancel(job);
4141 while (data.ret == -EINPROGRESS) {
4142 qemu_aio_wait();
4143 }
4144 return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
4145 }
4146
4147 void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
4148 {
4149 /* Check cancellation *before* setting busy = false, too! */
4150 if (!block_job_is_cancelled(job)) {
4151 job->busy = false;
4152 co_sleep_ns(clock, ns);
4153 job->busy = true;
4154 }
4155 }