]> git.proxmox.com Git - mirror_qemu.git/blame - block.c
block: Allow the user to define "node-name" option both on command line and QMP.
[mirror_qemu.git] / block.c
CommitLineData
fc01f7e7
FB
1/*
2 * QEMU System Emulator block driver
5fafdf24 3 *
fc01f7e7 4 * Copyright (c) 2003 Fabrice Bellard
5fafdf24 5 *
fc01f7e7
FB
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
3990d09a 24#include "config-host.h"
faf07963 25#include "qemu-common.h"
6d519a5f 26#include "trace.h"
83c9089e 27#include "monitor/monitor.h"
737e150e
PB
28#include "block/block_int.h"
29#include "block/blockjob.h"
1de7afc9 30#include "qemu/module.h"
7b1b5d19 31#include "qapi/qmp/qjson.h"
9c17d615 32#include "sysemu/sysemu.h"
1de7afc9 33#include "qemu/notify.h"
737e150e 34#include "block/coroutine.h"
b2023818 35#include "qmp-commands.h"
1de7afc9 36#include "qemu/timer.h"
fc01f7e7 37
71e72a19 38#ifdef CONFIG_BSD
7674e7bf
FB
39#include <sys/types.h>
40#include <sys/stat.h>
41#include <sys/ioctl.h>
72cf2d4f 42#include <sys/queue.h>
c5e97233 43#ifndef __DragonFly__
7674e7bf
FB
44#include <sys/disk.h>
45#endif
c5e97233 46#endif
7674e7bf 47
49dc768d
AL
48#ifdef _WIN32
49#include <windows.h>
50#endif
51
e4654d2d
FZ
52struct BdrvDirtyBitmap {
53 HBitmap *bitmap;
54 QLIST_ENTRY(BdrvDirtyBitmap) list;
55};
56
1c9805a3
SH
57#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
58
7d4b4ba5 59static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
f141eafe
AL
60static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
c87c0672 62 BlockDriverCompletionFunc *cb, void *opaque);
f141eafe
AL
63static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
64 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
ce1a14dc 65 BlockDriverCompletionFunc *cb, void *opaque);
f9f05dc5
KW
66static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
69static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
70 int64_t sector_num, int nb_sectors,
71 QEMUIOVector *iov);
c5fbe571 72static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
470c0504
SH
73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
1c9805a3 75static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
f08f2dda
SH
76 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
77 BdrvRequestFlags flags);
b2a61371
SH
78static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
79 int64_t sector_num,
80 QEMUIOVector *qiov,
81 int nb_sectors,
d20d9b7c 82 BdrvRequestFlags flags,
b2a61371
SH
83 BlockDriverCompletionFunc *cb,
84 void *opaque,
8c5873d6 85 bool is_write);
b2a61371 86static void coroutine_fn bdrv_co_do_rw(void *opaque);
621f0589 87static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
aa7bfbff 88 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
ec530c81 89
1b7bdbc1
SH
90static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
91 QTAILQ_HEAD_INITIALIZER(bdrv_states);
7ee930d0 92
dc364f4c
BC
93static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
94 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
95
8a22f02a
SH
96static QLIST_HEAD(, BlockDriver) bdrv_drivers =
97 QLIST_HEAD_INITIALIZER(bdrv_drivers);
ea2384d3 98
eb852011
MA
99/* If non-zero, use only whitelisted block drivers */
100static int use_bdrv_whitelist;
101
9e0b22f4
SH
102#ifdef _WIN32
103static int is_windows_drive_prefix(const char *filename)
104{
105 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
106 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
107 filename[1] == ':');
108}
109
110int is_windows_drive(const char *filename)
111{
112 if (is_windows_drive_prefix(filename) &&
113 filename[2] == '\0')
114 return 1;
115 if (strstart(filename, "\\\\.\\", NULL) ||
116 strstart(filename, "//./", NULL))
117 return 1;
118 return 0;
119}
120#endif
121
0563e191 122/* throttling disk I/O limits */
cc0681c4
BC
123void bdrv_set_io_limits(BlockDriverState *bs,
124 ThrottleConfig *cfg)
98f90dba 125{
cc0681c4 126 int i;
98f90dba 127
cc0681c4 128 throttle_config(&bs->throttle_state, cfg);
98f90dba 129
cc0681c4
BC
130 for (i = 0; i < 2; i++) {
131 qemu_co_enter_next(&bs->throttled_reqs[i]);
98f90dba 132 }
cc0681c4
BC
133}
134
135/* this function drain all the throttled IOs */
136static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
137{
138 bool drained = false;
139 bool enabled = bs->io_limits_enabled;
140 int i;
141
142 bs->io_limits_enabled = false;
143
144 for (i = 0; i < 2; i++) {
145 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
146 drained = true;
147 }
148 }
149
150 bs->io_limits_enabled = enabled;
98f90dba 151
cc0681c4 152 return drained;
98f90dba
ZYW
153}
154
cc0681c4 155void bdrv_io_limits_disable(BlockDriverState *bs)
0563e191 156{
cc0681c4 157 bs->io_limits_enabled = false;
0563e191 158
cc0681c4
BC
159 bdrv_start_throttled_reqs(bs);
160
161 throttle_destroy(&bs->throttle_state);
0563e191
ZYW
162}
163
cc0681c4 164static void bdrv_throttle_read_timer_cb(void *opaque)
0563e191 165{
cc0681c4
BC
166 BlockDriverState *bs = opaque;
167 qemu_co_enter_next(&bs->throttled_reqs[0]);
0563e191
ZYW
168}
169
cc0681c4 170static void bdrv_throttle_write_timer_cb(void *opaque)
0563e191 171{
cc0681c4
BC
172 BlockDriverState *bs = opaque;
173 qemu_co_enter_next(&bs->throttled_reqs[1]);
0563e191
ZYW
174}
175
cc0681c4
BC
176/* should be called before bdrv_set_io_limits if a limit is set */
177void bdrv_io_limits_enable(BlockDriverState *bs)
178{
179 assert(!bs->io_limits_enabled);
180 throttle_init(&bs->throttle_state,
181 QEMU_CLOCK_VIRTUAL,
182 bdrv_throttle_read_timer_cb,
183 bdrv_throttle_write_timer_cb,
184 bs);
185 bs->io_limits_enabled = true;
186}
187
188/* This function makes an IO wait if needed
189 *
190 * @nb_sectors: the number of sectors of the IO
191 * @is_write: is the IO a write
192 */
98f90dba 193static void bdrv_io_limits_intercept(BlockDriverState *bs,
cc0681c4
BC
194 int nb_sectors,
195 bool is_write)
98f90dba 196{
cc0681c4
BC
197 /* does this io must wait */
198 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
98f90dba 199
cc0681c4
BC
200 /* if must wait or any request of this type throttled queue the IO */
201 if (must_wait ||
202 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
203 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
98f90dba
ZYW
204 }
205
cc0681c4
BC
206 /* the IO will be executed, do the accounting */
207 throttle_account(&bs->throttle_state,
208 is_write,
209 nb_sectors * BDRV_SECTOR_SIZE);
98f90dba 210
cc0681c4
BC
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
213 return;
98f90dba
ZYW
214 }
215
cc0681c4
BC
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
98f90dba
ZYW
218}
219
9e0b22f4
SH
220/* check if the path starts with "<protocol>:" */
221static int path_has_protocol(const char *path)
222{
947995c0
PB
223 const char *p;
224
9e0b22f4
SH
225#ifdef _WIN32
226 if (is_windows_drive(path) ||
227 is_windows_drive_prefix(path)) {
228 return 0;
229 }
947995c0
PB
230 p = path + strcspn(path, ":/\\");
231#else
232 p = path + strcspn(path, ":/");
9e0b22f4
SH
233#endif
234
947995c0 235 return *p == ':';
9e0b22f4
SH
236}
237
83f64091 238int path_is_absolute(const char *path)
3b0d4f61 239{
21664424
FB
240#ifdef _WIN32
241 /* specific case for names like: "\\.\d:" */
f53f4da9 242 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
21664424 243 return 1;
f53f4da9
PB
244 }
245 return (*path == '/' || *path == '\\');
3b9f94e1 246#else
f53f4da9 247 return (*path == '/');
3b9f94e1 248#endif
3b0d4f61
FB
249}
250
83f64091
FB
251/* if filename is absolute, just copy it to dest. Otherwise, build a
252 path to it by considering it is relative to base_path. URL are
253 supported. */
254void path_combine(char *dest, int dest_size,
255 const char *base_path,
256 const char *filename)
3b0d4f61 257{
83f64091
FB
258 const char *p, *p1;
259 int len;
260
261 if (dest_size <= 0)
262 return;
263 if (path_is_absolute(filename)) {
264 pstrcpy(dest, dest_size, filename);
265 } else {
266 p = strchr(base_path, ':');
267 if (p)
268 p++;
269 else
270 p = base_path;
3b9f94e1
FB
271 p1 = strrchr(base_path, '/');
272#ifdef _WIN32
273 {
274 const char *p2;
275 p2 = strrchr(base_path, '\\');
276 if (!p1 || p2 > p1)
277 p1 = p2;
278 }
279#endif
83f64091
FB
280 if (p1)
281 p1++;
282 else
283 p1 = base_path;
284 if (p1 > p)
285 p = p1;
286 len = p - base_path;
287 if (len > dest_size - 1)
288 len = dest_size - 1;
289 memcpy(dest, base_path, len);
290 dest[len] = '\0';
291 pstrcat(dest, dest_size, filename);
3b0d4f61 292 }
3b0d4f61
FB
293}
294
dc5a1371
PB
295void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
296{
297 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
298 pstrcpy(dest, sz, bs->backing_file);
299 } else {
300 path_combine(dest, sz, bs->filename, bs->backing_file);
301 }
302}
303
5efa9d5a 304void bdrv_register(BlockDriver *bdrv)
ea2384d3 305{
8c5873d6
SH
306 /* Block drivers without coroutine functions need emulation */
307 if (!bdrv->bdrv_co_readv) {
f9f05dc5
KW
308 bdrv->bdrv_co_readv = bdrv_co_readv_em;
309 bdrv->bdrv_co_writev = bdrv_co_writev_em;
310
f8c35c1d
SH
311 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
312 * the block driver lacks aio we need to emulate that too.
313 */
f9f05dc5
KW
314 if (!bdrv->bdrv_aio_readv) {
315 /* add AIO emulation layer */
316 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
317 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
f9f05dc5 318 }
83f64091 319 }
b2e12bc6 320
8a22f02a 321 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
ea2384d3 322}
b338082b
FB
323
324/* create a new block device (by default it is empty) */
325BlockDriverState *bdrv_new(const char *device_name)
326{
1b7bdbc1 327 BlockDriverState *bs;
b338082b 328
7267c094 329 bs = g_malloc0(sizeof(BlockDriverState));
e4654d2d 330 QLIST_INIT(&bs->dirty_bitmaps);
b338082b 331 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
ea2384d3 332 if (device_name[0] != '\0') {
dc364f4c 333 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
ea2384d3 334 }
28a7282a 335 bdrv_iostatus_disable(bs);
d7d512f6 336 notifier_list_init(&bs->close_notifiers);
d616b224 337 notifier_with_return_list_init(&bs->before_write_notifiers);
cc0681c4
BC
338 qemu_co_queue_init(&bs->throttled_reqs[0]);
339 qemu_co_queue_init(&bs->throttled_reqs[1]);
9fcb0251 340 bs->refcnt = 1;
d7d512f6 341
b338082b
FB
342 return bs;
343}
344
d7d512f6
PB
345void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
346{
347 notifier_list_add(&bs->close_notifiers, notify);
348}
349
ea2384d3
FB
350BlockDriver *bdrv_find_format(const char *format_name)
351{
352 BlockDriver *drv1;
8a22f02a
SH
353 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
354 if (!strcmp(drv1->format_name, format_name)) {
ea2384d3 355 return drv1;
8a22f02a 356 }
ea2384d3
FB
357 }
358 return NULL;
359}
360
b64ec4e4 361static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
eb852011 362{
b64ec4e4
FZ
363 static const char *whitelist_rw[] = {
364 CONFIG_BDRV_RW_WHITELIST
365 };
366 static const char *whitelist_ro[] = {
367 CONFIG_BDRV_RO_WHITELIST
eb852011
MA
368 };
369 const char **p;
370
b64ec4e4 371 if (!whitelist_rw[0] && !whitelist_ro[0]) {
eb852011 372 return 1; /* no whitelist, anything goes */
b64ec4e4 373 }
eb852011 374
b64ec4e4 375 for (p = whitelist_rw; *p; p++) {
eb852011
MA
376 if (!strcmp(drv->format_name, *p)) {
377 return 1;
378 }
379 }
b64ec4e4
FZ
380 if (read_only) {
381 for (p = whitelist_ro; *p; p++) {
382 if (!strcmp(drv->format_name, *p)) {
383 return 1;
384 }
385 }
386 }
eb852011
MA
387 return 0;
388}
389
b64ec4e4
FZ
390BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
391 bool read_only)
eb852011
MA
392{
393 BlockDriver *drv = bdrv_find_format(format_name);
b64ec4e4 394 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
eb852011
MA
395}
396
5b7e1542
ZYW
397typedef struct CreateCo {
398 BlockDriver *drv;
399 char *filename;
400 QEMUOptionParameter *options;
401 int ret;
cc84d90f 402 Error *err;
5b7e1542
ZYW
403} CreateCo;
404
405static void coroutine_fn bdrv_create_co_entry(void *opaque)
406{
cc84d90f
HR
407 Error *local_err = NULL;
408 int ret;
409
5b7e1542
ZYW
410 CreateCo *cco = opaque;
411 assert(cco->drv);
412
cc84d90f
HR
413 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
414 if (error_is_set(&local_err)) {
415 error_propagate(&cco->err, local_err);
416 }
417 cco->ret = ret;
5b7e1542
ZYW
418}
419
0e7e1989 420int bdrv_create(BlockDriver *drv, const char* filename,
cc84d90f 421 QEMUOptionParameter *options, Error **errp)
ea2384d3 422{
5b7e1542
ZYW
423 int ret;
424
425 Coroutine *co;
426 CreateCo cco = {
427 .drv = drv,
428 .filename = g_strdup(filename),
429 .options = options,
430 .ret = NOT_DONE,
cc84d90f 431 .err = NULL,
5b7e1542
ZYW
432 };
433
434 if (!drv->bdrv_create) {
cc84d90f 435 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
80168bff
LC
436 ret = -ENOTSUP;
437 goto out;
5b7e1542
ZYW
438 }
439
440 if (qemu_in_coroutine()) {
441 /* Fast-path if already in coroutine context */
442 bdrv_create_co_entry(&cco);
443 } else {
444 co = qemu_coroutine_create(bdrv_create_co_entry);
445 qemu_coroutine_enter(co, &cco);
446 while (cco.ret == NOT_DONE) {
447 qemu_aio_wait();
448 }
449 }
450
451 ret = cco.ret;
cc84d90f
HR
452 if (ret < 0) {
453 if (error_is_set(&cco.err)) {
454 error_propagate(errp, cco.err);
455 } else {
456 error_setg_errno(errp, -ret, "Could not create image");
457 }
458 }
0e7e1989 459
80168bff
LC
460out:
461 g_free(cco.filename);
5b7e1542 462 return ret;
ea2384d3
FB
463}
464
cc84d90f
HR
465int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
466 Error **errp)
84a12e66
CH
467{
468 BlockDriver *drv;
cc84d90f
HR
469 Error *local_err = NULL;
470 int ret;
84a12e66 471
98289620 472 drv = bdrv_find_protocol(filename, true);
84a12e66 473 if (drv == NULL) {
cc84d90f 474 error_setg(errp, "Could not find protocol for file '%s'", filename);
16905d71 475 return -ENOENT;
84a12e66
CH
476 }
477
cc84d90f
HR
478 ret = bdrv_create(drv, filename, options, &local_err);
479 if (error_is_set(&local_err)) {
480 error_propagate(errp, local_err);
481 }
482 return ret;
84a12e66
CH
483}
484
eba25057
JM
485/*
486 * Create a uniquely-named empty temporary file.
487 * Return 0 upon success, otherwise a negative errno value.
488 */
489int get_tmp_filename(char *filename, int size)
d5249393 490{
eba25057 491#ifdef _WIN32
3b9f94e1 492 char temp_dir[MAX_PATH];
eba25057
JM
493 /* GetTempFileName requires that its output buffer (4th param)
494 have length MAX_PATH or greater. */
495 assert(size >= MAX_PATH);
496 return (GetTempPath(MAX_PATH, temp_dir)
497 && GetTempFileName(temp_dir, "qem", 0, filename)
498 ? 0 : -GetLastError());
d5249393 499#else
67b915a5 500 int fd;
7ccfb2eb 501 const char *tmpdir;
0badc1ee
AJ
502 tmpdir = getenv("TMPDIR");
503 if (!tmpdir)
504 tmpdir = "/tmp";
eba25057
JM
505 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
506 return -EOVERFLOW;
507 }
ea2384d3 508 fd = mkstemp(filename);
fe235a06
DH
509 if (fd < 0) {
510 return -errno;
511 }
512 if (close(fd) != 0) {
513 unlink(filename);
eba25057
JM
514 return -errno;
515 }
516 return 0;
d5249393 517#endif
eba25057 518}
fc01f7e7 519
84a12e66
CH
520/*
521 * Detect host devices. By convention, /dev/cdrom[N] is always
522 * recognized as a host CDROM.
523 */
524static BlockDriver *find_hdev_driver(const char *filename)
525{
526 int score_max = 0, score;
527 BlockDriver *drv = NULL, *d;
528
529 QLIST_FOREACH(d, &bdrv_drivers, list) {
530 if (d->bdrv_probe_device) {
531 score = d->bdrv_probe_device(filename);
532 if (score > score_max) {
533 score_max = score;
534 drv = d;
535 }
536 }
537 }
538
539 return drv;
540}
541
98289620
KW
542BlockDriver *bdrv_find_protocol(const char *filename,
543 bool allow_protocol_prefix)
83f64091
FB
544{
545 BlockDriver *drv1;
546 char protocol[128];
1cec71e3 547 int len;
83f64091 548 const char *p;
19cb3738 549
66f82cee
KW
550 /* TODO Drivers without bdrv_file_open must be specified explicitly */
551
39508e7a
CH
552 /*
553 * XXX(hch): we really should not let host device detection
554 * override an explicit protocol specification, but moving this
555 * later breaks access to device names with colons in them.
556 * Thanks to the brain-dead persistent naming schemes on udev-
557 * based Linux systems those actually are quite common.
558 */
559 drv1 = find_hdev_driver(filename);
560 if (drv1) {
561 return drv1;
562 }
563
98289620 564 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
39508e7a 565 return bdrv_find_format("file");
84a12e66 566 }
98289620 567
9e0b22f4
SH
568 p = strchr(filename, ':');
569 assert(p != NULL);
1cec71e3
AL
570 len = p - filename;
571 if (len > sizeof(protocol) - 1)
572 len = sizeof(protocol) - 1;
573 memcpy(protocol, filename, len);
574 protocol[len] = '\0';
8a22f02a 575 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
5fafdf24 576 if (drv1->protocol_name &&
8a22f02a 577 !strcmp(drv1->protocol_name, protocol)) {
83f64091 578 return drv1;
8a22f02a 579 }
83f64091
FB
580 }
581 return NULL;
582}
583
f500a6d3 584static int find_image_format(BlockDriverState *bs, const char *filename,
34b5d2c6 585 BlockDriver **pdrv, Error **errp)
f3a5d3f8 586{
f500a6d3 587 int score, score_max;
f3a5d3f8
CH
588 BlockDriver *drv1, *drv;
589 uint8_t buf[2048];
f500a6d3 590 int ret = 0;
f8ea0b00 591
08a00559 592 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
8e895599 593 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
c98ac35d
SW
594 drv = bdrv_find_format("raw");
595 if (!drv) {
34b5d2c6 596 error_setg(errp, "Could not find raw image format");
c98ac35d
SW
597 ret = -ENOENT;
598 }
599 *pdrv = drv;
600 return ret;
1a396859 601 }
f8ea0b00 602
83f64091 603 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
83f64091 604 if (ret < 0) {
34b5d2c6
HR
605 error_setg_errno(errp, -ret, "Could not read image for determining its "
606 "format");
c98ac35d
SW
607 *pdrv = NULL;
608 return ret;
83f64091
FB
609 }
610
ea2384d3 611 score_max = 0;
84a12e66 612 drv = NULL;
8a22f02a 613 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
83f64091
FB
614 if (drv1->bdrv_probe) {
615 score = drv1->bdrv_probe(buf, ret, filename);
616 if (score > score_max) {
617 score_max = score;
618 drv = drv1;
619 }
0849bf08 620 }
fc01f7e7 621 }
c98ac35d 622 if (!drv) {
34b5d2c6
HR
623 error_setg(errp, "Could not determine image format: No compatible "
624 "driver found");
c98ac35d
SW
625 ret = -ENOENT;
626 }
627 *pdrv = drv;
628 return ret;
ea2384d3
FB
629}
630
51762288
SH
631/**
632 * Set the current 'total_sectors' value
633 */
634static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
635{
636 BlockDriver *drv = bs->drv;
637
396759ad
NB
638 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
639 if (bs->sg)
640 return 0;
641
51762288
SH
642 /* query actual device if possible, otherwise just trust the hint */
643 if (drv->bdrv_getlength) {
644 int64_t length = drv->bdrv_getlength(bs);
645 if (length < 0) {
646 return length;
647 }
7e382003 648 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
51762288
SH
649 }
650
651 bs->total_sectors = hint;
652 return 0;
653}
654
9e8f1835
PB
655/**
656 * Set open flags for a given discard mode
657 *
658 * Return 0 on success, -1 if the discard mode was invalid.
659 */
660int bdrv_parse_discard_flags(const char *mode, int *flags)
661{
662 *flags &= ~BDRV_O_UNMAP;
663
664 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
665 /* do nothing */
666 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
667 *flags |= BDRV_O_UNMAP;
668 } else {
669 return -1;
670 }
671
672 return 0;
673}
674
c3993cdc
SH
675/**
676 * Set open flags for a given cache mode
677 *
678 * Return 0 on success, -1 if the cache mode was invalid.
679 */
680int bdrv_parse_cache_flags(const char *mode, int *flags)
681{
682 *flags &= ~BDRV_O_CACHE_MASK;
683
684 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
685 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
92196b2f
SH
686 } else if (!strcmp(mode, "directsync")) {
687 *flags |= BDRV_O_NOCACHE;
c3993cdc
SH
688 } else if (!strcmp(mode, "writeback")) {
689 *flags |= BDRV_O_CACHE_WB;
690 } else if (!strcmp(mode, "unsafe")) {
691 *flags |= BDRV_O_CACHE_WB;
692 *flags |= BDRV_O_NO_FLUSH;
693 } else if (!strcmp(mode, "writethrough")) {
694 /* this is the default */
695 } else {
696 return -1;
697 }
698
699 return 0;
700}
701
53fec9d3
SH
702/**
703 * The copy-on-read flag is actually a reference count so multiple users may
704 * use the feature without worrying about clobbering its previous state.
705 * Copy-on-read stays enabled until all users have called to disable it.
706 */
707void bdrv_enable_copy_on_read(BlockDriverState *bs)
708{
709 bs->copy_on_read++;
710}
711
712void bdrv_disable_copy_on_read(BlockDriverState *bs)
713{
714 assert(bs->copy_on_read > 0);
715 bs->copy_on_read--;
716}
717
7b272452
KW
718static int bdrv_open_flags(BlockDriverState *bs, int flags)
719{
720 int open_flags = flags | BDRV_O_CACHE_WB;
721
722 /*
723 * Clear flags that are internal to the block layer before opening the
724 * image.
725 */
726 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
727
728 /*
729 * Snapshots should be writable.
730 */
731 if (bs->is_temporary) {
732 open_flags |= BDRV_O_RDWR;
733 }
734
735 return open_flags;
736}
737
6913c0c2
BC
738static int bdrv_assign_node_name(BlockDriverState *bs,
739 const char *node_name,
740 Error **errp)
741{
742 if (!node_name) {
743 return 0;
744 }
745
746 /* empty string node name is invalid */
747 if (node_name[0] == '\0') {
748 error_setg(errp, "Empty node name");
749 return -EINVAL;
750 }
751
752 /* takes care of avoiding duplicates node names */
753 if (bdrv_find_node(node_name)) {
754 error_setg(errp, "Duplicate node name");
755 return -EINVAL;
756 }
757
758 /* copy node name into the bs and insert it into the graph list */
759 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
760 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
761
762 return 0;
763}
764
57915332
KW
765/*
766 * Common part for opening disk images and files
b6ad491a
KW
767 *
768 * Removes all processed options from *options.
57915332 769 */
f500a6d3 770static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
34b5d2c6 771 QDict *options, int flags, BlockDriver *drv, Error **errp)
57915332
KW
772{
773 int ret, open_flags;
035fccdf 774 const char *filename;
6913c0c2 775 const char *node_name = NULL;
34b5d2c6 776 Error *local_err = NULL;
57915332
KW
777
778 assert(drv != NULL);
6405875c 779 assert(bs->file == NULL);
707ff828 780 assert(options != NULL && bs->options != options);
57915332 781
45673671
KW
782 if (file != NULL) {
783 filename = file->filename;
784 } else {
785 filename = qdict_get_try_str(options, "filename");
786 }
787
788 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
28dcee10 789
6913c0c2
BC
790 node_name = qdict_get_try_str(options, "node-name");
791 ret = bdrv_assign_node_name(bs, node_name, errp);
792 if (ret < 0) {
793 return ret;
794 }
795 qdict_del(options, "node-name");
796
5d186eb0
KW
797 /* bdrv_open() with directly using a protocol as drv. This layer is already
798 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
799 * and return immediately. */
800 if (file != NULL && drv->bdrv_file_open) {
801 bdrv_swap(file, bs);
802 return 0;
803 }
804
57915332 805 bs->open_flags = flags;
57915332 806 bs->buffer_alignment = 512;
0d51b4de 807 bs->zero_beyond_eof = true;
b64ec4e4
FZ
808 open_flags = bdrv_open_flags(bs, flags);
809 bs->read_only = !(open_flags & BDRV_O_RDWR);
810
811 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
8f94a6e4
KW
812 error_setg(errp,
813 !bs->read_only && bdrv_is_whitelisted(drv, true)
814 ? "Driver '%s' can only be used for read-only devices"
815 : "Driver '%s' is not whitelisted",
816 drv->format_name);
b64ec4e4
FZ
817 return -ENOTSUP;
818 }
57915332 819
53fec9d3 820 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
0ebd24e0
KW
821 if (flags & BDRV_O_COPY_ON_READ) {
822 if (!bs->read_only) {
823 bdrv_enable_copy_on_read(bs);
824 } else {
825 error_setg(errp, "Can't use copy-on-read on read-only device");
826 return -EINVAL;
827 }
53fec9d3
SH
828 }
829
c2ad1b0c
KW
830 if (filename != NULL) {
831 pstrcpy(bs->filename, sizeof(bs->filename), filename);
832 } else {
833 bs->filename[0] = '\0';
834 }
57915332 835
57915332 836 bs->drv = drv;
7267c094 837 bs->opaque = g_malloc0(drv->instance_size);
57915332 838
03f541bd 839 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
e7c63796 840
66f82cee
KW
841 /* Open the image, either directly or using a protocol */
842 if (drv->bdrv_file_open) {
5d186eb0 843 assert(file == NULL);
030be321 844 assert(!drv->bdrv_needs_filename || filename != NULL);
34b5d2c6 845 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
f500a6d3 846 } else {
2af5ef70 847 if (file == NULL) {
34b5d2c6
HR
848 error_setg(errp, "Can't use '%s' as a block driver for the "
849 "protocol level", drv->format_name);
2af5ef70
KW
850 ret = -EINVAL;
851 goto free_and_fail;
852 }
f500a6d3 853 bs->file = file;
34b5d2c6 854 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
66f82cee
KW
855 }
856
57915332 857 if (ret < 0) {
34b5d2c6
HR
858 if (error_is_set(&local_err)) {
859 error_propagate(errp, local_err);
2fa9aa59
DH
860 } else if (bs->filename[0]) {
861 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
34b5d2c6
HR
862 } else {
863 error_setg_errno(errp, -ret, "Could not open image");
864 }
57915332
KW
865 goto free_and_fail;
866 }
867
51762288
SH
868 ret = refresh_total_sectors(bs, bs->total_sectors);
869 if (ret < 0) {
34b5d2c6 870 error_setg_errno(errp, -ret, "Could not refresh total sector count");
51762288 871 goto free_and_fail;
57915332 872 }
51762288 873
57915332
KW
874#ifndef _WIN32
875 if (bs->is_temporary) {
d4cea8df
DH
876 assert(bs->filename[0] != '\0');
877 unlink(bs->filename);
57915332
KW
878 }
879#endif
880 return 0;
881
882free_and_fail:
f500a6d3 883 bs->file = NULL;
7267c094 884 g_free(bs->opaque);
57915332
KW
885 bs->opaque = NULL;
886 bs->drv = NULL;
887 return ret;
888}
889
b6ce07aa
KW
890/*
891 * Opens a file using a protocol (file, host_device, nbd, ...)
787e4a85
KW
892 *
893 * options is a QDict of options to pass to the block drivers, or NULL for an
894 * empty set of options. The reference to the QDict belongs to the block layer
895 * after the call (even on failure), so if the caller intends to reuse the
896 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
b6ce07aa 897 */
787e4a85 898int bdrv_file_open(BlockDriverState **pbs, const char *filename,
72daa72e
HR
899 const char *reference, QDict *options, int flags,
900 Error **errp)
ea2384d3 901{
72daa72e 902 BlockDriverState *bs = NULL;
6db95603 903 BlockDriver *drv;
c2ad1b0c 904 const char *drvname;
98289620 905 bool allow_protocol_prefix = false;
34b5d2c6 906 Error *local_err = NULL;
83f64091
FB
907 int ret;
908
707ff828
KW
909 /* NULL means an empty set of options */
910 if (options == NULL) {
911 options = qdict_new();
912 }
913
72daa72e
HR
914 if (reference) {
915 if (filename || qdict_size(options)) {
916 error_setg(errp, "Cannot reference an existing block device with "
917 "additional options or a new filename");
918 return -EINVAL;
919 }
920 QDECREF(options);
921
922 bs = bdrv_find(reference);
923 if (!bs) {
924 error_setg(errp, "Cannot find block device '%s'", reference);
925 return -ENODEV;
926 }
927 bdrv_ref(bs);
928 *pbs = bs;
929 return 0;
930 }
931
83f64091 932 bs = bdrv_new("");
707ff828
KW
933 bs->options = options;
934 options = qdict_clone_shallow(options);
935
035fccdf
KW
936 /* Fetch the file name from the options QDict if necessary */
937 if (!filename) {
938 filename = qdict_get_try_str(options, "filename");
939 } else if (filename && !qdict_haskey(options, "filename")) {
940 qdict_put(options, "filename", qstring_from_str(filename));
98289620 941 allow_protocol_prefix = true;
035fccdf 942 } else {
34b5d2c6
HR
943 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
944 "same time");
035fccdf
KW
945 ret = -EINVAL;
946 goto fail;
947 }
948
c2ad1b0c
KW
949 /* Find the right block driver */
950 drvname = qdict_get_try_str(options, "driver");
951 if (drvname) {
8f94a6e4 952 drv = bdrv_find_format(drvname);
34b5d2c6
HR
953 if (!drv) {
954 error_setg(errp, "Unknown driver '%s'", drvname);
955 }
c2ad1b0c
KW
956 qdict_del(options, "driver");
957 } else if (filename) {
98289620
KW
958 drv = bdrv_find_protocol(filename, allow_protocol_prefix);
959 if (!drv) {
34b5d2c6 960 error_setg(errp, "Unknown protocol");
98289620 961 }
c2ad1b0c 962 } else {
34b5d2c6 963 error_setg(errp, "Must specify either driver or file");
c2ad1b0c
KW
964 drv = NULL;
965 }
966
967 if (!drv) {
34b5d2c6 968 /* errp has been set already */
c2ad1b0c
KW
969 ret = -ENOENT;
970 goto fail;
971 }
972
973 /* Parse the filename and open it */
974 if (drv->bdrv_parse_filename && filename) {
6963a30d
KW
975 drv->bdrv_parse_filename(filename, options, &local_err);
976 if (error_is_set(&local_err)) {
34b5d2c6 977 error_propagate(errp, local_err);
6963a30d
KW
978 ret = -EINVAL;
979 goto fail;
980 }
56d1b4d2 981 qdict_del(options, "filename");
030be321 982 } else if (drv->bdrv_needs_filename && !filename) {
34b5d2c6
HR
983 error_setg(errp, "The '%s' block driver requires a file name",
984 drv->format_name);
c2ad1b0c
KW
985 ret = -EINVAL;
986 goto fail;
6963a30d
KW
987 }
988
505d7583
HR
989 if (!drv->bdrv_file_open) {
990 ret = bdrv_open(bs, filename, options, flags, drv, &local_err);
991 options = NULL;
992 } else {
993 ret = bdrv_open_common(bs, NULL, options, flags, drv, &local_err);
994 }
83f64091 995 if (ret < 0) {
34b5d2c6 996 error_propagate(errp, local_err);
707ff828
KW
997 goto fail;
998 }
999
1000 /* Check if any unknown options were used */
505d7583 1001 if (options && (qdict_size(options) != 0)) {
707ff828 1002 const QDictEntry *entry = qdict_first(options);
34b5d2c6
HR
1003 error_setg(errp, "Block protocol '%s' doesn't support the option '%s'",
1004 drv->format_name, entry->key);
707ff828
KW
1005 ret = -EINVAL;
1006 goto fail;
3b0d4f61 1007 }
707ff828
KW
1008 QDECREF(options);
1009
71d0770c 1010 bs->growable = 1;
83f64091
FB
1011 *pbs = bs;
1012 return 0;
707ff828
KW
1013
1014fail:
1015 QDECREF(options);
1016 if (!bs->drv) {
1017 QDECREF(bs->options);
1018 }
4f6fd349 1019 bdrv_unref(bs);
707ff828 1020 return ret;
83f64091
FB
1021}
1022
31ca6d07
KW
1023/*
1024 * Opens the backing file for a BlockDriverState if not yet open
1025 *
1026 * options is a QDict of options to pass to the block drivers, or NULL for an
1027 * empty set of options. The reference to the QDict is transferred to this
1028 * function (even on failure), so if the caller intends to reuse the dictionary,
1029 * it needs to use QINCREF() before calling bdrv_file_open.
1030 */
34b5d2c6 1031int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
9156df12
PB
1032{
1033 char backing_filename[PATH_MAX];
1034 int back_flags, ret;
1035 BlockDriver *back_drv = NULL;
34b5d2c6 1036 Error *local_err = NULL;
9156df12
PB
1037
1038 if (bs->backing_hd != NULL) {
31ca6d07 1039 QDECREF(options);
9156df12
PB
1040 return 0;
1041 }
1042
31ca6d07
KW
1043 /* NULL means an empty set of options */
1044 if (options == NULL) {
1045 options = qdict_new();
1046 }
1047
9156df12 1048 bs->open_flags &= ~BDRV_O_NO_BACKING;
1cb6f506
KW
1049 if (qdict_haskey(options, "file.filename")) {
1050 backing_filename[0] = '\0';
1051 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
31ca6d07 1052 QDECREF(options);
9156df12 1053 return 0;
dbecebdd
FZ
1054 } else {
1055 bdrv_get_full_backing_filename(bs, backing_filename,
1056 sizeof(backing_filename));
9156df12
PB
1057 }
1058
1059 bs->backing_hd = bdrv_new("");
9156df12
PB
1060
1061 if (bs->backing_format[0] != '\0') {
1062 back_drv = bdrv_find_format(bs->backing_format);
1063 }
1064
1065 /* backing files always opened read-only */
87a5debd
TL
1066 back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT |
1067 BDRV_O_COPY_ON_READ);
9156df12 1068
31ca6d07
KW
1069 ret = bdrv_open(bs->backing_hd,
1070 *backing_filename ? backing_filename : NULL, options,
34b5d2c6 1071 back_flags, back_drv, &local_err);
9156df12 1072 if (ret < 0) {
4f6fd349 1073 bdrv_unref(bs->backing_hd);
9156df12
PB
1074 bs->backing_hd = NULL;
1075 bs->open_flags |= BDRV_O_NO_BACKING;
b04b6b6e
FZ
1076 error_setg(errp, "Could not open backing file: %s",
1077 error_get_pretty(local_err));
1078 error_free(local_err);
9156df12
PB
1079 return ret;
1080 }
d80ac658
PF
1081
1082 if (bs->backing_hd->file) {
1083 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1084 bs->backing_hd->file->filename);
1085 }
1086
9156df12
PB
1087 return 0;
1088}
1089
da557aac
HR
1090/*
1091 * Opens a disk image whose options are given as BlockdevRef in another block
1092 * device's options.
1093 *
1094 * If force_raw is true, bdrv_file_open() will be used, thereby preventing any
1095 * image format auto-detection. If it is false and a filename is given,
1096 * bdrv_open() will be used for auto-detection.
1097 *
1098 * If allow_none is true, no image will be opened if filename is false and no
1099 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1100 *
1101 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1102 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1103 * itself, all options starting with "${bdref_key}." are considered part of the
1104 * BlockdevRef.
1105 *
1106 * The BlockdevRef will be removed from the options QDict.
1107 */
1108int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1109 QDict *options, const char *bdref_key, int flags,
1110 bool force_raw, bool allow_none, Error **errp)
1111{
1112 QDict *image_options;
1113 int ret;
1114 char *bdref_key_dot;
1115 const char *reference;
1116
1117 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1118 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1119 g_free(bdref_key_dot);
1120
1121 reference = qdict_get_try_str(options, bdref_key);
1122 if (!filename && !reference && !qdict_size(image_options)) {
1123 if (allow_none) {
1124 ret = 0;
1125 } else {
1126 error_setg(errp, "A block device must be specified for \"%s\"",
1127 bdref_key);
1128 ret = -EINVAL;
1129 }
1130 goto done;
1131 }
1132
1133 if (filename && !force_raw) {
1134 /* If a filename is given and the block driver should be detected
1135 automatically (instead of using none), use bdrv_open() in order to do
1136 that auto-detection. */
1137 BlockDriverState *bs;
1138
1139 if (reference) {
1140 error_setg(errp, "Cannot reference an existing block device while "
1141 "giving a filename");
1142 ret = -EINVAL;
1143 goto done;
1144 }
1145
1146 bs = bdrv_new("");
1147 ret = bdrv_open(bs, filename, image_options, flags, NULL, errp);
1148 if (ret < 0) {
1149 bdrv_unref(bs);
1150 } else {
1151 *pbs = bs;
1152 }
1153 } else {
1154 ret = bdrv_file_open(pbs, filename, reference, image_options, flags,
1155 errp);
1156 }
1157
1158done:
1159 qdict_del(options, bdref_key);
1160 return ret;
1161}
1162
b6ce07aa
KW
1163/*
1164 * Opens a disk image (raw, qcow2, vmdk, ...)
de9c0cec
KW
1165 *
1166 * options is a QDict of options to pass to the block drivers, or NULL for an
1167 * empty set of options. The reference to the QDict belongs to the block layer
1168 * after the call (even on failure), so if the caller intends to reuse the
1169 * dictionary, it needs to use QINCREF() before calling bdrv_open.
b6ce07aa 1170 */
de9c0cec 1171int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
34b5d2c6 1172 int flags, BlockDriver *drv, Error **errp)
ea2384d3 1173{
b6ce07aa 1174 int ret;
89c9bc3d
SW
1175 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1176 char tmp_filename[PATH_MAX + 1];
f500a6d3 1177 BlockDriverState *file = NULL;
74fe54f2 1178 const char *drvname;
34b5d2c6 1179 Error *local_err = NULL;
712e7874 1180
de9c0cec
KW
1181 /* NULL means an empty set of options */
1182 if (options == NULL) {
1183 options = qdict_new();
1184 }
1185
1186 bs->options = options;
b6ad491a 1187 options = qdict_clone_shallow(options);
de9c0cec
KW
1188
1189 /* For snapshot=on, create a temporary qcow2 overlay */
83f64091 1190 if (flags & BDRV_O_SNAPSHOT) {
ea2384d3
FB
1191 BlockDriverState *bs1;
1192 int64_t total_size;
91a073a9 1193 BlockDriver *bdrv_qcow2;
08b392e1 1194 QEMUOptionParameter *create_options;
9fd3171a 1195 QDict *snapshot_options;
c2ad1b0c 1196
ea2384d3
FB
1197 /* if snapshot, we create a temporary backing file and open it
1198 instead of opening 'filename' directly */
33e3963e 1199
9fd3171a 1200 /* Get the required size from the image */
ea2384d3 1201 bs1 = bdrv_new("");
9fd3171a 1202 QINCREF(options);
c9fbb99d
KW
1203 ret = bdrv_open(bs1, filename, options, BDRV_O_NO_BACKING,
1204 drv, &local_err);
51d7c00c 1205 if (ret < 0) {
4f6fd349 1206 bdrv_unref(bs1);
de9c0cec 1207 goto fail;
ea2384d3 1208 }
3e82990b 1209 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
7c96d46e 1210
4f6fd349 1211 bdrv_unref(bs1);
3b46e624 1212
9fd3171a 1213 /* Create the temporary image */
eba25057
JM
1214 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
1215 if (ret < 0) {
34b5d2c6 1216 error_setg_errno(errp, -ret, "Could not get temporary filename");
de9c0cec 1217 goto fail;
eba25057 1218 }
7c96d46e 1219
91a073a9 1220 bdrv_qcow2 = bdrv_find_format("qcow2");
08b392e1
KW
1221 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1222 NULL);
91a073a9 1223
08b392e1 1224 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
91a073a9 1225
cc84d90f 1226 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
08b392e1 1227 free_option_parameters(create_options);
51d7c00c 1228 if (ret < 0) {
34b5d2c6 1229 error_setg_errno(errp, -ret, "Could not create temporary overlay "
cc84d90f
HR
1230 "'%s': %s", tmp_filename,
1231 error_get_pretty(local_err));
1232 error_free(local_err);
1233 local_err = NULL;
de9c0cec 1234 goto fail;
ea2384d3 1235 }
91a073a9 1236
9fd3171a
KW
1237 /* Prepare a new options QDict for the temporary file, where user
1238 * options refer to the backing file */
1239 if (filename) {
1240 qdict_put(options, "file.filename", qstring_from_str(filename));
1241 }
1242 if (drv) {
1243 qdict_put(options, "driver", qstring_from_str(drv->format_name));
1244 }
1245
1246 snapshot_options = qdict_new();
1247 qdict_put(snapshot_options, "backing", options);
1248 qdict_flatten(snapshot_options);
1249
1250 bs->options = snapshot_options;
1251 options = qdict_clone_shallow(bs->options);
1252
ea2384d3 1253 filename = tmp_filename;
91a073a9 1254 drv = bdrv_qcow2;
ea2384d3
FB
1255 bs->is_temporary = 1;
1256 }
712e7874 1257
f500a6d3
KW
1258 /* Open image file without format layer */
1259 if (flags & BDRV_O_RDWR) {
1260 flags |= BDRV_O_ALLOW_RDWR;
1261 }
1262
054963f8
HR
1263 ret = bdrv_open_image(&file, filename, options, "file",
1264 bdrv_open_flags(bs, flags | BDRV_O_UNMAP), true, true,
1265 &local_err);
1266 if (ret < 0) {
1267 goto fail;
f500a6d3
KW
1268 }
1269
b6ce07aa 1270 /* Find the right image format driver */
74fe54f2
KW
1271 drvname = qdict_get_try_str(options, "driver");
1272 if (drvname) {
8f94a6e4 1273 drv = bdrv_find_format(drvname);
74fe54f2 1274 qdict_del(options, "driver");
06d22aa3
KW
1275 if (!drv) {
1276 error_setg(errp, "Invalid driver: '%s'", drvname);
1277 ret = -EINVAL;
1278 goto unlink_and_fail;
1279 }
74fe54f2
KW
1280 }
1281
6db95603 1282 if (!drv) {
2a05cbe4
HR
1283 if (file) {
1284 ret = find_image_format(file, filename, &drv, &local_err);
1285 } else {
1286 error_setg(errp, "Must specify either driver or file");
1287 ret = -EINVAL;
1288 goto unlink_and_fail;
1289 }
51d7c00c 1290 }
6987307c 1291
51d7c00c 1292 if (!drv) {
51d7c00c 1293 goto unlink_and_fail;
ea2384d3 1294 }
b6ce07aa
KW
1295
1296 /* Open the image */
34b5d2c6 1297 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
b6ce07aa 1298 if (ret < 0) {
6987307c
CH
1299 goto unlink_and_fail;
1300 }
1301
2a05cbe4 1302 if (file && (bs->file != file)) {
4f6fd349 1303 bdrv_unref(file);
f500a6d3
KW
1304 file = NULL;
1305 }
1306
b6ce07aa 1307 /* If there is a backing file, use it */
9156df12 1308 if ((flags & BDRV_O_NO_BACKING) == 0) {
31ca6d07
KW
1309 QDict *backing_options;
1310
5726d872 1311 qdict_extract_subqdict(options, &backing_options, "backing.");
34b5d2c6 1312 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
b6ce07aa 1313 if (ret < 0) {
b6ad491a 1314 goto close_and_fail;
b6ce07aa 1315 }
b6ce07aa
KW
1316 }
1317
b6ad491a
KW
1318 /* Check if any unknown options were used */
1319 if (qdict_size(options) != 0) {
1320 const QDictEntry *entry = qdict_first(options);
34b5d2c6
HR
1321 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1322 "support the option '%s'", drv->format_name, bs->device_name,
1323 entry->key);
b6ad491a
KW
1324
1325 ret = -EINVAL;
1326 goto close_and_fail;
1327 }
1328 QDECREF(options);
1329
b6ce07aa 1330 if (!bdrv_key_required(bs)) {
7d4b4ba5 1331 bdrv_dev_change_media_cb(bs, true);
b6ce07aa
KW
1332 }
1333
1334 return 0;
1335
1336unlink_and_fail:
f500a6d3 1337 if (file != NULL) {
4f6fd349 1338 bdrv_unref(file);
f500a6d3 1339 }
b6ce07aa
KW
1340 if (bs->is_temporary) {
1341 unlink(filename);
1342 }
de9c0cec
KW
1343fail:
1344 QDECREF(bs->options);
b6ad491a 1345 QDECREF(options);
de9c0cec 1346 bs->options = NULL;
34b5d2c6
HR
1347 if (error_is_set(&local_err)) {
1348 error_propagate(errp, local_err);
1349 }
b6ad491a 1350 return ret;
de9c0cec 1351
b6ad491a
KW
1352close_and_fail:
1353 bdrv_close(bs);
1354 QDECREF(options);
34b5d2c6
HR
1355 if (error_is_set(&local_err)) {
1356 error_propagate(errp, local_err);
1357 }
b6ce07aa
KW
1358 return ret;
1359}
1360
e971aa12
JC
1361typedef struct BlockReopenQueueEntry {
1362 bool prepared;
1363 BDRVReopenState state;
1364 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1365} BlockReopenQueueEntry;
1366
1367/*
1368 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1369 * reopen of multiple devices.
1370 *
1371 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1372 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1373 * be created and initialized. This newly created BlockReopenQueue should be
1374 * passed back in for subsequent calls that are intended to be of the same
1375 * atomic 'set'.
1376 *
1377 * bs is the BlockDriverState to add to the reopen queue.
1378 *
1379 * flags contains the open flags for the associated bs
1380 *
1381 * returns a pointer to bs_queue, which is either the newly allocated
1382 * bs_queue, or the existing bs_queue being used.
1383 *
1384 */
1385BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1386 BlockDriverState *bs, int flags)
1387{
1388 assert(bs != NULL);
1389
1390 BlockReopenQueueEntry *bs_entry;
1391 if (bs_queue == NULL) {
1392 bs_queue = g_new0(BlockReopenQueue, 1);
1393 QSIMPLEQ_INIT(bs_queue);
1394 }
1395
1396 if (bs->file) {
1397 bdrv_reopen_queue(bs_queue, bs->file, flags);
1398 }
1399
1400 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1401 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1402
1403 bs_entry->state.bs = bs;
1404 bs_entry->state.flags = flags;
1405
1406 return bs_queue;
1407}
1408
1409/*
1410 * Reopen multiple BlockDriverStates atomically & transactionally.
1411 *
1412 * The queue passed in (bs_queue) must have been built up previous
1413 * via bdrv_reopen_queue().
1414 *
1415 * Reopens all BDS specified in the queue, with the appropriate
1416 * flags. All devices are prepared for reopen, and failure of any
1417 * device will cause all device changes to be abandonded, and intermediate
1418 * data cleaned up.
1419 *
1420 * If all devices prepare successfully, then the changes are committed
1421 * to all devices.
1422 *
1423 */
1424int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1425{
1426 int ret = -1;
1427 BlockReopenQueueEntry *bs_entry, *next;
1428 Error *local_err = NULL;
1429
1430 assert(bs_queue != NULL);
1431
1432 bdrv_drain_all();
1433
1434 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1435 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1436 error_propagate(errp, local_err);
1437 goto cleanup;
1438 }
1439 bs_entry->prepared = true;
1440 }
1441
1442 /* If we reach this point, we have success and just need to apply the
1443 * changes
1444 */
1445 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1446 bdrv_reopen_commit(&bs_entry->state);
1447 }
1448
1449 ret = 0;
1450
1451cleanup:
1452 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1453 if (ret && bs_entry->prepared) {
1454 bdrv_reopen_abort(&bs_entry->state);
1455 }
1456 g_free(bs_entry);
1457 }
1458 g_free(bs_queue);
1459 return ret;
1460}
1461
1462
1463/* Reopen a single BlockDriverState with the specified flags. */
1464int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1465{
1466 int ret = -1;
1467 Error *local_err = NULL;
1468 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1469
1470 ret = bdrv_reopen_multiple(queue, &local_err);
1471 if (local_err != NULL) {
1472 error_propagate(errp, local_err);
1473 }
1474 return ret;
1475}
1476
1477
1478/*
1479 * Prepares a BlockDriverState for reopen. All changes are staged in the
1480 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1481 * the block driver layer .bdrv_reopen_prepare()
1482 *
1483 * bs is the BlockDriverState to reopen
1484 * flags are the new open flags
1485 * queue is the reopen queue
1486 *
1487 * Returns 0 on success, non-zero on error. On error errp will be set
1488 * as well.
1489 *
1490 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1491 * It is the responsibility of the caller to then call the abort() or
1492 * commit() for any other BDS that have been left in a prepare() state
1493 *
1494 */
1495int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1496 Error **errp)
1497{
1498 int ret = -1;
1499 Error *local_err = NULL;
1500 BlockDriver *drv;
1501
1502 assert(reopen_state != NULL);
1503 assert(reopen_state->bs->drv != NULL);
1504 drv = reopen_state->bs->drv;
1505
1506 /* if we are to stay read-only, do not allow permission change
1507 * to r/w */
1508 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1509 reopen_state->flags & BDRV_O_RDWR) {
1510 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1511 reopen_state->bs->device_name);
1512 goto error;
1513 }
1514
1515
1516 ret = bdrv_flush(reopen_state->bs);
1517 if (ret) {
1518 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1519 strerror(-ret));
1520 goto error;
1521 }
1522
1523 if (drv->bdrv_reopen_prepare) {
1524 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1525 if (ret) {
1526 if (local_err != NULL) {
1527 error_propagate(errp, local_err);
1528 } else {
d8b6895f
LC
1529 error_setg(errp, "failed while preparing to reopen image '%s'",
1530 reopen_state->bs->filename);
e971aa12
JC
1531 }
1532 goto error;
1533 }
1534 } else {
1535 /* It is currently mandatory to have a bdrv_reopen_prepare()
1536 * handler for each supported drv. */
1537 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1538 drv->format_name, reopen_state->bs->device_name,
1539 "reopening of file");
1540 ret = -1;
1541 goto error;
1542 }
1543
1544 ret = 0;
1545
1546error:
1547 return ret;
1548}
1549
1550/*
1551 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1552 * makes them final by swapping the staging BlockDriverState contents into
1553 * the active BlockDriverState contents.
1554 */
1555void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1556{
1557 BlockDriver *drv;
1558
1559 assert(reopen_state != NULL);
1560 drv = reopen_state->bs->drv;
1561 assert(drv != NULL);
1562
1563 /* If there are any driver level actions to take */
1564 if (drv->bdrv_reopen_commit) {
1565 drv->bdrv_reopen_commit(reopen_state);
1566 }
1567
1568 /* set BDS specific flags now */
1569 reopen_state->bs->open_flags = reopen_state->flags;
1570 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1571 BDRV_O_CACHE_WB);
1572 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1573}
1574
1575/*
1576 * Abort the reopen, and delete and free the staged changes in
1577 * reopen_state
1578 */
1579void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1580{
1581 BlockDriver *drv;
1582
1583 assert(reopen_state != NULL);
1584 drv = reopen_state->bs->drv;
1585 assert(drv != NULL);
1586
1587 if (drv->bdrv_reopen_abort) {
1588 drv->bdrv_reopen_abort(reopen_state);
1589 }
1590}
1591
1592
fc01f7e7
FB
1593void bdrv_close(BlockDriverState *bs)
1594{
3cbc002c
PB
1595 if (bs->job) {
1596 block_job_cancel_sync(bs->job);
1597 }
58fda173
SH
1598 bdrv_drain_all(); /* complete I/O */
1599 bdrv_flush(bs);
1600 bdrv_drain_all(); /* in case flush left pending I/O */
d7d512f6 1601 notifier_list_notify(&bs->close_notifiers, bs);
7094f12f 1602
3cbc002c 1603 if (bs->drv) {
557df6ac 1604 if (bs->backing_hd) {
4f6fd349 1605 bdrv_unref(bs->backing_hd);
557df6ac
SH
1606 bs->backing_hd = NULL;
1607 }
ea2384d3 1608 bs->drv->bdrv_close(bs);
7267c094 1609 g_free(bs->opaque);
ea2384d3
FB
1610#ifdef _WIN32
1611 if (bs->is_temporary) {
1612 unlink(bs->filename);
1613 }
67b915a5 1614#endif
ea2384d3
FB
1615 bs->opaque = NULL;
1616 bs->drv = NULL;
53fec9d3 1617 bs->copy_on_read = 0;
a275fa42
PB
1618 bs->backing_file[0] = '\0';
1619 bs->backing_format[0] = '\0';
6405875c
PB
1620 bs->total_sectors = 0;
1621 bs->encrypted = 0;
1622 bs->valid_key = 0;
1623 bs->sg = 0;
1624 bs->growable = 0;
0d51b4de 1625 bs->zero_beyond_eof = false;
de9c0cec
KW
1626 QDECREF(bs->options);
1627 bs->options = NULL;
b338082b 1628
66f82cee 1629 if (bs->file != NULL) {
4f6fd349 1630 bdrv_unref(bs->file);
0ac9377d 1631 bs->file = NULL;
66f82cee 1632 }
b338082b 1633 }
98f90dba 1634
9ca11154
PH
1635 bdrv_dev_change_media_cb(bs, false);
1636
98f90dba
ZYW
1637 /*throttling disk I/O limits*/
1638 if (bs->io_limits_enabled) {
1639 bdrv_io_limits_disable(bs);
1640 }
b338082b
FB
1641}
1642
2bc93fed
MK
1643void bdrv_close_all(void)
1644{
1645 BlockDriverState *bs;
1646
dc364f4c 1647 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2bc93fed
MK
1648 bdrv_close(bs);
1649 }
1650}
1651
88266f5a
SH
1652/* Check if any requests are in-flight (including throttled requests) */
1653static bool bdrv_requests_pending(BlockDriverState *bs)
1654{
1655 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1656 return true;
1657 }
cc0681c4
BC
1658 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1659 return true;
1660 }
1661 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
88266f5a
SH
1662 return true;
1663 }
1664 if (bs->file && bdrv_requests_pending(bs->file)) {
1665 return true;
1666 }
1667 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1668 return true;
1669 }
1670 return false;
1671}
1672
1673static bool bdrv_requests_pending_all(void)
1674{
1675 BlockDriverState *bs;
dc364f4c 1676 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
88266f5a
SH
1677 if (bdrv_requests_pending(bs)) {
1678 return true;
1679 }
1680 }
1681 return false;
1682}
1683
922453bc
SH
1684/*
1685 * Wait for pending requests to complete across all BlockDriverStates
1686 *
1687 * This function does not flush data to disk, use bdrv_flush_all() for that
1688 * after calling this function.
4c355d53
ZYW
1689 *
1690 * Note that completion of an asynchronous I/O operation can trigger any
1691 * number of other I/O operations on other devices---for example a coroutine
1692 * can be arbitrarily complex and a constant flow of I/O can come until the
1693 * coroutine is complete. Because of this, it is not possible to have a
1694 * function to drain a single device's I/O queue.
922453bc
SH
1695 */
1696void bdrv_drain_all(void)
1697{
88266f5a
SH
1698 /* Always run first iteration so any pending completion BHs run */
1699 bool busy = true;
922453bc
SH
1700 BlockDriverState *bs;
1701
88266f5a 1702 while (busy) {
dc364f4c 1703 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
0b06ef3b 1704 bdrv_start_throttled_reqs(bs);
4c355d53 1705 }
922453bc 1706
88266f5a
SH
1707 busy = bdrv_requests_pending_all();
1708 busy |= aio_poll(qemu_get_aio_context(), busy);
922453bc
SH
1709 }
1710}
1711
dc364f4c
BC
1712/* make a BlockDriverState anonymous by removing from bdrv_state and
1713 * graph_bdrv_state list.
d22b2f41
RH
1714 Also, NULL terminate the device_name to prevent double remove */
1715void bdrv_make_anon(BlockDriverState *bs)
1716{
1717 if (bs->device_name[0] != '\0') {
dc364f4c 1718 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
d22b2f41
RH
1719 }
1720 bs->device_name[0] = '\0';
dc364f4c
BC
1721 if (bs->node_name[0] != '\0') {
1722 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1723 }
1724 bs->node_name[0] = '\0';
d22b2f41
RH
1725}
1726
e023b2e2
PB
1727static void bdrv_rebind(BlockDriverState *bs)
1728{
1729 if (bs->drv && bs->drv->bdrv_rebind) {
1730 bs->drv->bdrv_rebind(bs);
1731 }
1732}
1733
4ddc07ca
PB
1734static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1735 BlockDriverState *bs_src)
8802d1fd 1736{
4ddc07ca
PB
1737 /* move some fields that need to stay attached to the device */
1738 bs_dest->open_flags = bs_src->open_flags;
8802d1fd
JC
1739
1740 /* dev info */
4ddc07ca
PB
1741 bs_dest->dev_ops = bs_src->dev_ops;
1742 bs_dest->dev_opaque = bs_src->dev_opaque;
1743 bs_dest->dev = bs_src->dev;
1744 bs_dest->buffer_alignment = bs_src->buffer_alignment;
1745 bs_dest->copy_on_read = bs_src->copy_on_read;
8802d1fd 1746
4ddc07ca 1747 bs_dest->enable_write_cache = bs_src->enable_write_cache;
c4a248a1 1748
cc0681c4
BC
1749 /* i/o throttled req */
1750 memcpy(&bs_dest->throttle_state,
1751 &bs_src->throttle_state,
1752 sizeof(ThrottleState));
1753 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1754 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
4ddc07ca 1755 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
8802d1fd 1756
8802d1fd 1757 /* r/w error */
4ddc07ca
PB
1758 bs_dest->on_read_error = bs_src->on_read_error;
1759 bs_dest->on_write_error = bs_src->on_write_error;
8802d1fd
JC
1760
1761 /* i/o status */
4ddc07ca
PB
1762 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1763 bs_dest->iostatus = bs_src->iostatus;
8802d1fd 1764
a9fc4408 1765 /* dirty bitmap */
e4654d2d 1766 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
a9fc4408 1767
9fcb0251
FZ
1768 /* reference count */
1769 bs_dest->refcnt = bs_src->refcnt;
1770
a9fc4408 1771 /* job */
4ddc07ca
PB
1772 bs_dest->in_use = bs_src->in_use;
1773 bs_dest->job = bs_src->job;
a9fc4408 1774
8802d1fd 1775 /* keep the same entry in bdrv_states */
4ddc07ca
PB
1776 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1777 bs_src->device_name);
dc364f4c
BC
1778 bs_dest->device_list = bs_src->device_list;
1779
1780 /* keep the same entry in graph_bdrv_states
1781 * We do want to swap name but don't want to swap linked list entries
1782 */
1783 bs_dest->node_list = bs_src->node_list;
4ddc07ca 1784}
8802d1fd 1785
4ddc07ca
PB
1786/*
1787 * Swap bs contents for two image chains while they are live,
1788 * while keeping required fields on the BlockDriverState that is
1789 * actually attached to a device.
1790 *
1791 * This will modify the BlockDriverState fields, and swap contents
1792 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1793 *
1794 * bs_new is required to be anonymous.
1795 *
1796 * This function does not create any image files.
1797 */
1798void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1799{
1800 BlockDriverState tmp;
f6801b83 1801
4ddc07ca
PB
1802 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1803 assert(bs_new->device_name[0] == '\0');
e4654d2d 1804 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
4ddc07ca
PB
1805 assert(bs_new->job == NULL);
1806 assert(bs_new->dev == NULL);
1807 assert(bs_new->in_use == 0);
1808 assert(bs_new->io_limits_enabled == false);
cc0681c4 1809 assert(!throttle_have_timer(&bs_new->throttle_state));
8802d1fd 1810
4ddc07ca
PB
1811 tmp = *bs_new;
1812 *bs_new = *bs_old;
1813 *bs_old = tmp;
a9fc4408 1814
4ddc07ca
PB
1815 /* there are some fields that should not be swapped, move them back */
1816 bdrv_move_feature_fields(&tmp, bs_old);
1817 bdrv_move_feature_fields(bs_old, bs_new);
1818 bdrv_move_feature_fields(bs_new, &tmp);
8802d1fd 1819
4ddc07ca
PB
1820 /* bs_new shouldn't be in bdrv_states even after the swap! */
1821 assert(bs_new->device_name[0] == '\0');
1822
1823 /* Check a few fields that should remain attached to the device */
1824 assert(bs_new->dev == NULL);
1825 assert(bs_new->job == NULL);
1826 assert(bs_new->in_use == 0);
1827 assert(bs_new->io_limits_enabled == false);
cc0681c4 1828 assert(!throttle_have_timer(&bs_new->throttle_state));
e023b2e2
PB
1829
1830 bdrv_rebind(bs_new);
4ddc07ca
PB
1831 bdrv_rebind(bs_old);
1832}
1833
1834/*
1835 * Add new bs contents at the top of an image chain while the chain is
1836 * live, while keeping required fields on the top layer.
1837 *
1838 * This will modify the BlockDriverState fields, and swap contents
1839 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1840 *
1841 * bs_new is required to be anonymous.
1842 *
1843 * This function does not create any image files.
1844 */
1845void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1846{
1847 bdrv_swap(bs_new, bs_top);
1848
1849 /* The contents of 'tmp' will become bs_top, as we are
1850 * swapping bs_new and bs_top contents. */
1851 bs_top->backing_hd = bs_new;
1852 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1853 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1854 bs_new->filename);
1855 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1856 bs_new->drv ? bs_new->drv->format_name : "");
8802d1fd
JC
1857}
1858
4f6fd349 1859static void bdrv_delete(BlockDriverState *bs)
b338082b 1860{
fa879d62 1861 assert(!bs->dev);
3e914655
PB
1862 assert(!bs->job);
1863 assert(!bs->in_use);
4f6fd349 1864 assert(!bs->refcnt);
e4654d2d 1865 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
18846dee 1866
e1b5c52e
SH
1867 bdrv_close(bs);
1868
1b7bdbc1 1869 /* remove from list, if necessary */
d22b2f41 1870 bdrv_make_anon(bs);
34c6f050 1871
7267c094 1872 g_free(bs);
fc01f7e7
FB
1873}
1874
fa879d62
MA
1875int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1876/* TODO change to DeviceState *dev when all users are qdevified */
18846dee 1877{
fa879d62 1878 if (bs->dev) {
18846dee
MA
1879 return -EBUSY;
1880 }
fa879d62 1881 bs->dev = dev;
28a7282a 1882 bdrv_iostatus_reset(bs);
18846dee
MA
1883 return 0;
1884}
1885
fa879d62
MA
1886/* TODO qdevified devices don't use this, remove when devices are qdevified */
1887void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
18846dee 1888{
fa879d62
MA
1889 if (bdrv_attach_dev(bs, dev) < 0) {
1890 abort();
1891 }
1892}
1893
1894void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1895/* TODO change to DeviceState *dev when all users are qdevified */
1896{
1897 assert(bs->dev == dev);
1898 bs->dev = NULL;
0e49de52
MA
1899 bs->dev_ops = NULL;
1900 bs->dev_opaque = NULL;
29e05f20 1901 bs->buffer_alignment = 512;
18846dee
MA
1902}
1903
fa879d62
MA
1904/* TODO change to return DeviceState * when all users are qdevified */
1905void *bdrv_get_attached_dev(BlockDriverState *bs)
18846dee 1906{
fa879d62 1907 return bs->dev;
18846dee
MA
1908}
1909
0e49de52
MA
1910void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1911 void *opaque)
1912{
1913 bs->dev_ops = ops;
1914 bs->dev_opaque = opaque;
1915}
1916
32c81a4a
PB
1917void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1918 enum MonitorEvent ev,
1919 BlockErrorAction action, bool is_read)
329c0a48
LC
1920{
1921 QObject *data;
1922 const char *action_str;
1923
1924 switch (action) {
1925 case BDRV_ACTION_REPORT:
1926 action_str = "report";
1927 break;
1928 case BDRV_ACTION_IGNORE:
1929 action_str = "ignore";
1930 break;
1931 case BDRV_ACTION_STOP:
1932 action_str = "stop";
1933 break;
1934 default:
1935 abort();
1936 }
1937
1938 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1939 bdrv->device_name,
1940 action_str,
1941 is_read ? "read" : "write");
32c81a4a 1942 monitor_protocol_event(ev, data);
329c0a48
LC
1943
1944 qobject_decref(data);
1945}
1946
6f382ed2
LC
1947static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1948{
1949 QObject *data;
1950
1951 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1952 bdrv_get_device_name(bs), ejected);
1953 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1954
1955 qobject_decref(data);
1956}
1957
7d4b4ba5 1958static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
0e49de52 1959{
145feb17 1960 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
6f382ed2 1961 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
7d4b4ba5 1962 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
6f382ed2
LC
1963 if (tray_was_closed) {
1964 /* tray open */
1965 bdrv_emit_qmp_eject_event(bs, true);
1966 }
1967 if (load) {
1968 /* tray close */
1969 bdrv_emit_qmp_eject_event(bs, false);
1970 }
145feb17
MA
1971 }
1972}
1973
2c6942fa
MA
1974bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1975{
1976 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1977}
1978
025ccaa7
PB
1979void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1980{
1981 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1982 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1983 }
1984}
1985
e4def80b
MA
1986bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1987{
1988 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1989 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1990 }
1991 return false;
1992}
1993
145feb17
MA
1994static void bdrv_dev_resize_cb(BlockDriverState *bs)
1995{
1996 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1997 bs->dev_ops->resize_cb(bs->dev_opaque);
0e49de52
MA
1998 }
1999}
2000
f107639a
MA
2001bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2002{
2003 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2004 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2005 }
2006 return false;
2007}
2008
e97fc193
AL
2009/*
2010 * Run consistency checks on an image
2011 *
e076f338 2012 * Returns 0 if the check could be completed (it doesn't mean that the image is
a1c7273b 2013 * free of errors) or -errno when an internal error occurred. The results of the
e076f338 2014 * check are stored in res.
e97fc193 2015 */
4534ff54 2016int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
e97fc193
AL
2017{
2018 if (bs->drv->bdrv_check == NULL) {
2019 return -ENOTSUP;
2020 }
2021
e076f338 2022 memset(res, 0, sizeof(*res));
4534ff54 2023 return bs->drv->bdrv_check(bs, res, fix);
e97fc193
AL
2024}
2025
8a426614
KW
2026#define COMMIT_BUF_SECTORS 2048
2027
33e3963e
FB
2028/* commit COW file into the raw image */
2029int bdrv_commit(BlockDriverState *bs)
2030{
19cb3738 2031 BlockDriver *drv = bs->drv;
8a426614
KW
2032 int64_t sector, total_sectors;
2033 int n, ro, open_flags;
0bce597d 2034 int ret = 0;
8a426614 2035 uint8_t *buf;
c2cba3d9 2036 char filename[PATH_MAX];
33e3963e 2037
19cb3738
FB
2038 if (!drv)
2039 return -ENOMEDIUM;
4dca4b63
NS
2040
2041 if (!bs->backing_hd) {
2042 return -ENOTSUP;
33e3963e
FB
2043 }
2044
2d3735d3
SH
2045 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
2046 return -EBUSY;
2047 }
2048
4dca4b63 2049 ro = bs->backing_hd->read_only;
c2cba3d9
JM
2050 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2051 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
4dca4b63
NS
2052 open_flags = bs->backing_hd->open_flags;
2053
2054 if (ro) {
0bce597d
JC
2055 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2056 return -EACCES;
4dca4b63 2057 }
ea2384d3 2058 }
33e3963e 2059
6ea44308 2060 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
7267c094 2061 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
8a426614
KW
2062
2063 for (sector = 0; sector < total_sectors; sector += n) {
d663640c
PB
2064 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2065 if (ret < 0) {
2066 goto ro_cleanup;
2067 }
2068 if (ret) {
8a426614
KW
2069 if (bdrv_read(bs, sector, buf, n) != 0) {
2070 ret = -EIO;
2071 goto ro_cleanup;
2072 }
2073
2074 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
2075 ret = -EIO;
2076 goto ro_cleanup;
2077 }
ea2384d3 2078 }
33e3963e 2079 }
95389c86 2080
1d44952f
CH
2081 if (drv->bdrv_make_empty) {
2082 ret = drv->bdrv_make_empty(bs);
2083 bdrv_flush(bs);
2084 }
95389c86 2085
3f5075ae
CH
2086 /*
2087 * Make sure all data we wrote to the backing device is actually
2088 * stable on disk.
2089 */
2090 if (bs->backing_hd)
2091 bdrv_flush(bs->backing_hd);
4dca4b63
NS
2092
2093ro_cleanup:
7267c094 2094 g_free(buf);
4dca4b63
NS
2095
2096 if (ro) {
0bce597d
JC
2097 /* ignoring error return here */
2098 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
4dca4b63
NS
2099 }
2100
1d44952f 2101 return ret;
33e3963e
FB
2102}
2103
e8877497 2104int bdrv_commit_all(void)
6ab4b5ab
MA
2105{
2106 BlockDriverState *bs;
2107
dc364f4c 2108 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
272d2d8e
JC
2109 if (bs->drv && bs->backing_hd) {
2110 int ret = bdrv_commit(bs);
2111 if (ret < 0) {
2112 return ret;
2113 }
e8877497 2114 }
6ab4b5ab 2115 }
e8877497 2116 return 0;
6ab4b5ab
MA
2117}
2118
dbffbdcf
SH
2119/**
2120 * Remove an active request from the tracked requests list
2121 *
2122 * This function should be called when a tracked request is completing.
2123 */
2124static void tracked_request_end(BdrvTrackedRequest *req)
2125{
2126 QLIST_REMOVE(req, list);
f4658285 2127 qemu_co_queue_restart_all(&req->wait_queue);
dbffbdcf
SH
2128}
2129
2130/**
2131 * Add an active request to the tracked requests list
2132 */
2133static void tracked_request_begin(BdrvTrackedRequest *req,
2134 BlockDriverState *bs,
2135 int64_t sector_num,
2136 int nb_sectors, bool is_write)
2137{
2138 *req = (BdrvTrackedRequest){
2139 .bs = bs,
2140 .sector_num = sector_num,
2141 .nb_sectors = nb_sectors,
2142 .is_write = is_write,
5f8b6491 2143 .co = qemu_coroutine_self(),
dbffbdcf
SH
2144 };
2145
f4658285
SH
2146 qemu_co_queue_init(&req->wait_queue);
2147
dbffbdcf
SH
2148 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2149}
2150
d83947ac
SH
2151/**
2152 * Round a region to cluster boundaries
2153 */
343bded4
PB
2154void bdrv_round_to_clusters(BlockDriverState *bs,
2155 int64_t sector_num, int nb_sectors,
2156 int64_t *cluster_sector_num,
2157 int *cluster_nb_sectors)
d83947ac
SH
2158{
2159 BlockDriverInfo bdi;
2160
2161 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2162 *cluster_sector_num = sector_num;
2163 *cluster_nb_sectors = nb_sectors;
2164 } else {
2165 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2166 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2167 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2168 nb_sectors, c);
2169 }
2170}
2171
f4658285
SH
2172static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2173 int64_t sector_num, int nb_sectors) {
d83947ac
SH
2174 /* aaaa bbbb */
2175 if (sector_num >= req->sector_num + req->nb_sectors) {
2176 return false;
2177 }
2178 /* bbbb aaaa */
2179 if (req->sector_num >= sector_num + nb_sectors) {
2180 return false;
2181 }
2182 return true;
f4658285
SH
2183}
2184
2185static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
2186 int64_t sector_num, int nb_sectors)
2187{
2188 BdrvTrackedRequest *req;
d83947ac
SH
2189 int64_t cluster_sector_num;
2190 int cluster_nb_sectors;
f4658285
SH
2191 bool retry;
2192
d83947ac
SH
2193 /* If we touch the same cluster it counts as an overlap. This guarantees
2194 * that allocating writes will be serialized and not race with each other
2195 * for the same cluster. For example, in copy-on-read it ensures that the
2196 * CoR read and write operations are atomic and guest writes cannot
2197 * interleave between them.
2198 */
343bded4
PB
2199 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2200 &cluster_sector_num, &cluster_nb_sectors);
d83947ac 2201
f4658285
SH
2202 do {
2203 retry = false;
2204 QLIST_FOREACH(req, &bs->tracked_requests, list) {
d83947ac
SH
2205 if (tracked_request_overlaps(req, cluster_sector_num,
2206 cluster_nb_sectors)) {
5f8b6491
SH
2207 /* Hitting this means there was a reentrant request, for
2208 * example, a block driver issuing nested requests. This must
2209 * never happen since it means deadlock.
2210 */
2211 assert(qemu_coroutine_self() != req->co);
2212
f4658285
SH
2213 qemu_co_queue_wait(&req->wait_queue);
2214 retry = true;
2215 break;
2216 }
2217 }
2218 } while (retry);
2219}
2220
756e6736
KW
2221/*
2222 * Return values:
2223 * 0 - success
2224 * -EINVAL - backing format specified, but no file
2225 * -ENOSPC - can't update the backing file because no space is left in the
2226 * image file header
2227 * -ENOTSUP - format driver doesn't support changing the backing file
2228 */
2229int bdrv_change_backing_file(BlockDriverState *bs,
2230 const char *backing_file, const char *backing_fmt)
2231{
2232 BlockDriver *drv = bs->drv;
469ef350 2233 int ret;
756e6736 2234
5f377794
PB
2235 /* Backing file format doesn't make sense without a backing file */
2236 if (backing_fmt && !backing_file) {
2237 return -EINVAL;
2238 }
2239
756e6736 2240 if (drv->bdrv_change_backing_file != NULL) {
469ef350 2241 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
756e6736 2242 } else {
469ef350 2243 ret = -ENOTSUP;
756e6736 2244 }
469ef350
PB
2245
2246 if (ret == 0) {
2247 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2248 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2249 }
2250 return ret;
756e6736
KW
2251}
2252
6ebdcee2
JC
2253/*
2254 * Finds the image layer in the chain that has 'bs' as its backing file.
2255 *
2256 * active is the current topmost image.
2257 *
2258 * Returns NULL if bs is not found in active's image chain,
2259 * or if active == bs.
2260 */
2261BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2262 BlockDriverState *bs)
2263{
2264 BlockDriverState *overlay = NULL;
2265 BlockDriverState *intermediate;
2266
2267 assert(active != NULL);
2268 assert(bs != NULL);
2269
2270 /* if bs is the same as active, then by definition it has no overlay
2271 */
2272 if (active == bs) {
2273 return NULL;
2274 }
2275
2276 intermediate = active;
2277 while (intermediate->backing_hd) {
2278 if (intermediate->backing_hd == bs) {
2279 overlay = intermediate;
2280 break;
2281 }
2282 intermediate = intermediate->backing_hd;
2283 }
2284
2285 return overlay;
2286}
2287
2288typedef struct BlkIntermediateStates {
2289 BlockDriverState *bs;
2290 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2291} BlkIntermediateStates;
2292
2293
2294/*
2295 * Drops images above 'base' up to and including 'top', and sets the image
2296 * above 'top' to have base as its backing file.
2297 *
2298 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2299 * information in 'bs' can be properly updated.
2300 *
2301 * E.g., this will convert the following chain:
2302 * bottom <- base <- intermediate <- top <- active
2303 *
2304 * to
2305 *
2306 * bottom <- base <- active
2307 *
2308 * It is allowed for bottom==base, in which case it converts:
2309 *
2310 * base <- intermediate <- top <- active
2311 *
2312 * to
2313 *
2314 * base <- active
2315 *
2316 * Error conditions:
2317 * if active == top, that is considered an error
2318 *
2319 */
2320int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2321 BlockDriverState *base)
2322{
2323 BlockDriverState *intermediate;
2324 BlockDriverState *base_bs = NULL;
2325 BlockDriverState *new_top_bs = NULL;
2326 BlkIntermediateStates *intermediate_state, *next;
2327 int ret = -EIO;
2328
2329 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2330 QSIMPLEQ_INIT(&states_to_delete);
2331
2332 if (!top->drv || !base->drv) {
2333 goto exit;
2334 }
2335
2336 new_top_bs = bdrv_find_overlay(active, top);
2337
2338 if (new_top_bs == NULL) {
2339 /* we could not find the image above 'top', this is an error */
2340 goto exit;
2341 }
2342
2343 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2344 * to do, no intermediate images */
2345 if (new_top_bs->backing_hd == base) {
2346 ret = 0;
2347 goto exit;
2348 }
2349
2350 intermediate = top;
2351
2352 /* now we will go down through the list, and add each BDS we find
2353 * into our deletion queue, until we hit the 'base'
2354 */
2355 while (intermediate) {
2356 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2357 intermediate_state->bs = intermediate;
2358 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2359
2360 if (intermediate->backing_hd == base) {
2361 base_bs = intermediate->backing_hd;
2362 break;
2363 }
2364 intermediate = intermediate->backing_hd;
2365 }
2366 if (base_bs == NULL) {
2367 /* something went wrong, we did not end at the base. safely
2368 * unravel everything, and exit with error */
2369 goto exit;
2370 }
2371
2372 /* success - we can delete the intermediate states, and link top->base */
2373 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2374 base_bs->drv ? base_bs->drv->format_name : "");
2375 if (ret) {
2376 goto exit;
2377 }
2378 new_top_bs->backing_hd = base_bs;
2379
2380
2381 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2382 /* so that bdrv_close() does not recursively close the chain */
2383 intermediate_state->bs->backing_hd = NULL;
4f6fd349 2384 bdrv_unref(intermediate_state->bs);
6ebdcee2
JC
2385 }
2386 ret = 0;
2387
2388exit:
2389 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2390 g_free(intermediate_state);
2391 }
2392 return ret;
2393}
2394
2395
71d0770c
AL
2396static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2397 size_t size)
2398{
2399 int64_t len;
2400
2401 if (!bdrv_is_inserted(bs))
2402 return -ENOMEDIUM;
2403
2404 if (bs->growable)
2405 return 0;
2406
2407 len = bdrv_getlength(bs);
2408
fbb7b4e0
KW
2409 if (offset < 0)
2410 return -EIO;
2411
2412 if ((offset > len) || (len - offset < size))
71d0770c
AL
2413 return -EIO;
2414
2415 return 0;
2416}
2417
2418static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2419 int nb_sectors)
2420{
eb5a3165
JS
2421 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2422 nb_sectors * BDRV_SECTOR_SIZE);
71d0770c
AL
2423}
2424
1c9805a3
SH
2425typedef struct RwCo {
2426 BlockDriverState *bs;
2427 int64_t sector_num;
2428 int nb_sectors;
2429 QEMUIOVector *qiov;
2430 bool is_write;
2431 int ret;
4105eaaa 2432 BdrvRequestFlags flags;
1c9805a3
SH
2433} RwCo;
2434
2435static void coroutine_fn bdrv_rw_co_entry(void *opaque)
fc01f7e7 2436{
1c9805a3 2437 RwCo *rwco = opaque;
ea2384d3 2438
1c9805a3
SH
2439 if (!rwco->is_write) {
2440 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
4105eaaa
PL
2441 rwco->nb_sectors, rwco->qiov,
2442 rwco->flags);
1c9805a3
SH
2443 } else {
2444 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
4105eaaa
PL
2445 rwco->nb_sectors, rwco->qiov,
2446 rwco->flags);
1c9805a3
SH
2447 }
2448}
e7a8a783 2449
1c9805a3 2450/*
8d3b1a2d 2451 * Process a vectored synchronous request using coroutines
1c9805a3 2452 */
8d3b1a2d 2453static int bdrv_rwv_co(BlockDriverState *bs, int64_t sector_num,
4105eaaa
PL
2454 QEMUIOVector *qiov, bool is_write,
2455 BdrvRequestFlags flags)
1c9805a3 2456{
1c9805a3
SH
2457 Coroutine *co;
2458 RwCo rwco = {
2459 .bs = bs,
2460 .sector_num = sector_num,
8d3b1a2d
KW
2461 .nb_sectors = qiov->size >> BDRV_SECTOR_BITS,
2462 .qiov = qiov,
1c9805a3
SH
2463 .is_write = is_write,
2464 .ret = NOT_DONE,
4105eaaa 2465 .flags = flags,
1c9805a3 2466 };
8d3b1a2d 2467 assert((qiov->size & (BDRV_SECTOR_SIZE - 1)) == 0);
e7a8a783 2468
498e386c
ZYW
2469 /**
2470 * In sync call context, when the vcpu is blocked, this throttling timer
2471 * will not fire; so the I/O throttling function has to be disabled here
2472 * if it has been enabled.
2473 */
2474 if (bs->io_limits_enabled) {
2475 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2476 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2477 bdrv_io_limits_disable(bs);
2478 }
2479
1c9805a3
SH
2480 if (qemu_in_coroutine()) {
2481 /* Fast-path if already in coroutine context */
2482 bdrv_rw_co_entry(&rwco);
2483 } else {
2484 co = qemu_coroutine_create(bdrv_rw_co_entry);
2485 qemu_coroutine_enter(co, &rwco);
2486 while (rwco.ret == NOT_DONE) {
2487 qemu_aio_wait();
2488 }
2489 }
2490 return rwco.ret;
2491}
b338082b 2492
8d3b1a2d
KW
2493/*
2494 * Process a synchronous request using coroutines
2495 */
2496static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
4105eaaa 2497 int nb_sectors, bool is_write, BdrvRequestFlags flags)
8d3b1a2d
KW
2498{
2499 QEMUIOVector qiov;
2500 struct iovec iov = {
2501 .iov_base = (void *)buf,
2502 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2503 };
2504
2505 qemu_iovec_init_external(&qiov, &iov, 1);
4105eaaa 2506 return bdrv_rwv_co(bs, sector_num, &qiov, is_write, flags);
8d3b1a2d
KW
2507}
2508
1c9805a3
SH
2509/* return < 0 if error. See bdrv_write() for the return codes */
2510int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2511 uint8_t *buf, int nb_sectors)
2512{
4105eaaa 2513 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
fc01f7e7
FB
2514}
2515
07d27a44
MA
2516/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2517int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2518 uint8_t *buf, int nb_sectors)
2519{
2520 bool enabled;
2521 int ret;
2522
2523 enabled = bs->io_limits_enabled;
2524 bs->io_limits_enabled = false;
4e7395e8 2525 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
07d27a44
MA
2526 bs->io_limits_enabled = enabled;
2527 return ret;
2528}
2529
5fafdf24 2530/* Return < 0 if error. Important errors are:
19cb3738
FB
2531 -EIO generic I/O error (may happen for all errors)
2532 -ENOMEDIUM No media inserted.
2533 -EINVAL Invalid sector number or nb_sectors
2534 -EACCES Trying to write a read-only device
2535*/
5fafdf24 2536int bdrv_write(BlockDriverState *bs, int64_t sector_num,
fc01f7e7
FB
2537 const uint8_t *buf, int nb_sectors)
2538{
4105eaaa 2539 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
83f64091
FB
2540}
2541
8d3b1a2d
KW
2542int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov)
2543{
4105eaaa
PL
2544 return bdrv_rwv_co(bs, sector_num, qiov, true, 0);
2545}
2546
aa7bfbff
PL
2547int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2548 int nb_sectors, BdrvRequestFlags flags)
4105eaaa
PL
2549{
2550 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
aa7bfbff 2551 BDRV_REQ_ZERO_WRITE | flags);
8d3b1a2d
KW
2552}
2553
d75cbb5e
PL
2554/*
2555 * Completely zero out a block device with the help of bdrv_write_zeroes.
2556 * The operation is sped up by checking the block status and only writing
2557 * zeroes to the device if they currently do not return zeroes. Optional
2558 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2559 *
2560 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2561 */
2562int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2563{
2564 int64_t target_size = bdrv_getlength(bs) / BDRV_SECTOR_SIZE;
2565 int64_t ret, nb_sectors, sector_num = 0;
2566 int n;
2567
2568 for (;;) {
2569 nb_sectors = target_size - sector_num;
2570 if (nb_sectors <= 0) {
2571 return 0;
2572 }
2573 if (nb_sectors > INT_MAX) {
2574 nb_sectors = INT_MAX;
2575 }
2576 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
3d94ce60
PL
2577 if (ret < 0) {
2578 error_report("error getting block status at sector %" PRId64 ": %s",
2579 sector_num, strerror(-ret));
2580 return ret;
2581 }
d75cbb5e
PL
2582 if (ret & BDRV_BLOCK_ZERO) {
2583 sector_num += n;
2584 continue;
2585 }
2586 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2587 if (ret < 0) {
2588 error_report("error writing zeroes at sector %" PRId64 ": %s",
2589 sector_num, strerror(-ret));
2590 return ret;
2591 }
2592 sector_num += n;
2593 }
2594}
2595
eda578e5
AL
2596int bdrv_pread(BlockDriverState *bs, int64_t offset,
2597 void *buf, int count1)
83f64091 2598{
6ea44308 2599 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
83f64091
FB
2600 int len, nb_sectors, count;
2601 int64_t sector_num;
9a8c4cce 2602 int ret;
83f64091
FB
2603
2604 count = count1;
2605 /* first read to align to sector start */
6ea44308 2606 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
83f64091
FB
2607 if (len > count)
2608 len = count;
6ea44308 2609 sector_num = offset >> BDRV_SECTOR_BITS;
83f64091 2610 if (len > 0) {
9a8c4cce
KW
2611 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2612 return ret;
6ea44308 2613 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
83f64091
FB
2614 count -= len;
2615 if (count == 0)
2616 return count1;
2617 sector_num++;
2618 buf += len;
2619 }
2620
2621 /* read the sectors "in place" */
6ea44308 2622 nb_sectors = count >> BDRV_SECTOR_BITS;
83f64091 2623 if (nb_sectors > 0) {
9a8c4cce
KW
2624 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
2625 return ret;
83f64091 2626 sector_num += nb_sectors;
6ea44308 2627 len = nb_sectors << BDRV_SECTOR_BITS;
83f64091
FB
2628 buf += len;
2629 count -= len;
2630 }
2631
2632 /* add data from the last sector */
2633 if (count > 0) {
9a8c4cce
KW
2634 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2635 return ret;
83f64091
FB
2636 memcpy(buf, tmp_buf, count);
2637 }
2638 return count1;
2639}
2640
8d3b1a2d 2641int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
83f64091 2642{
6ea44308 2643 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
83f64091
FB
2644 int len, nb_sectors, count;
2645 int64_t sector_num;
9a8c4cce 2646 int ret;
83f64091 2647
8d3b1a2d
KW
2648 count = qiov->size;
2649
83f64091 2650 /* first write to align to sector start */
6ea44308 2651 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
83f64091
FB
2652 if (len > count)
2653 len = count;
6ea44308 2654 sector_num = offset >> BDRV_SECTOR_BITS;
83f64091 2655 if (len > 0) {
9a8c4cce
KW
2656 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2657 return ret;
8d3b1a2d
KW
2658 qemu_iovec_to_buf(qiov, 0, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)),
2659 len);
9a8c4cce
KW
2660 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2661 return ret;
83f64091
FB
2662 count -= len;
2663 if (count == 0)
8d3b1a2d 2664 return qiov->size;
83f64091 2665 sector_num++;
83f64091
FB
2666 }
2667
2668 /* write the sectors "in place" */
6ea44308 2669 nb_sectors = count >> BDRV_SECTOR_BITS;
83f64091 2670 if (nb_sectors > 0) {
8d3b1a2d
KW
2671 QEMUIOVector qiov_inplace;
2672
2673 qemu_iovec_init(&qiov_inplace, qiov->niov);
2674 qemu_iovec_concat(&qiov_inplace, qiov, len,
2675 nb_sectors << BDRV_SECTOR_BITS);
2676 ret = bdrv_writev(bs, sector_num, &qiov_inplace);
2677 qemu_iovec_destroy(&qiov_inplace);
2678 if (ret < 0) {
9a8c4cce 2679 return ret;
8d3b1a2d
KW
2680 }
2681
83f64091 2682 sector_num += nb_sectors;
6ea44308 2683 len = nb_sectors << BDRV_SECTOR_BITS;
83f64091
FB
2684 count -= len;
2685 }
2686
2687 /* add data from the last sector */
2688 if (count > 0) {
9a8c4cce
KW
2689 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2690 return ret;
8d3b1a2d 2691 qemu_iovec_to_buf(qiov, qiov->size - count, tmp_buf, count);
9a8c4cce
KW
2692 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2693 return ret;
83f64091 2694 }
8d3b1a2d
KW
2695 return qiov->size;
2696}
2697
2698int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2699 const void *buf, int count1)
2700{
2701 QEMUIOVector qiov;
2702 struct iovec iov = {
2703 .iov_base = (void *) buf,
2704 .iov_len = count1,
2705 };
2706
2707 qemu_iovec_init_external(&qiov, &iov, 1);
2708 return bdrv_pwritev(bs, offset, &qiov);
83f64091 2709}
83f64091 2710
f08145fe
KW
2711/*
2712 * Writes to the file and ensures that no writes are reordered across this
2713 * request (acts as a barrier)
2714 *
2715 * Returns 0 on success, -errno in error cases.
2716 */
2717int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2718 const void *buf, int count)
2719{
2720 int ret;
2721
2722 ret = bdrv_pwrite(bs, offset, buf, count);
2723 if (ret < 0) {
2724 return ret;
2725 }
2726
f05fa4ad
PB
2727 /* No flush needed for cache modes that already do it */
2728 if (bs->enable_write_cache) {
f08145fe
KW
2729 bdrv_flush(bs);
2730 }
2731
2732 return 0;
2733}
2734
470c0504 2735static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
ab185921
SH
2736 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2737{
2738 /* Perform I/O through a temporary buffer so that users who scribble over
2739 * their read buffer while the operation is in progress do not end up
2740 * modifying the image file. This is critical for zero-copy guest I/O
2741 * where anything might happen inside guest memory.
2742 */
2743 void *bounce_buffer;
2744
79c053bd 2745 BlockDriver *drv = bs->drv;
ab185921
SH
2746 struct iovec iov;
2747 QEMUIOVector bounce_qiov;
2748 int64_t cluster_sector_num;
2749 int cluster_nb_sectors;
2750 size_t skip_bytes;
2751 int ret;
2752
2753 /* Cover entire cluster so no additional backing file I/O is required when
2754 * allocating cluster in the image file.
2755 */
343bded4
PB
2756 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2757 &cluster_sector_num, &cluster_nb_sectors);
ab185921 2758
470c0504
SH
2759 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2760 cluster_sector_num, cluster_nb_sectors);
ab185921
SH
2761
2762 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2763 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2764 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2765
79c053bd
SH
2766 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2767 &bounce_qiov);
ab185921
SH
2768 if (ret < 0) {
2769 goto err;
2770 }
2771
79c053bd
SH
2772 if (drv->bdrv_co_write_zeroes &&
2773 buffer_is_zero(bounce_buffer, iov.iov_len)) {
621f0589 2774 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
aa7bfbff 2775 cluster_nb_sectors, 0);
79c053bd 2776 } else {
f05fa4ad
PB
2777 /* This does not change the data on the disk, it is not necessary
2778 * to flush even in cache=writethrough mode.
2779 */
79c053bd 2780 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
ab185921 2781 &bounce_qiov);
79c053bd
SH
2782 }
2783
ab185921
SH
2784 if (ret < 0) {
2785 /* It might be okay to ignore write errors for guest requests. If this
2786 * is a deliberate copy-on-read then we don't want to ignore the error.
2787 * Simply report it in all cases.
2788 */
2789 goto err;
2790 }
2791
2792 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
03396148
MT
2793 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2794 nb_sectors * BDRV_SECTOR_SIZE);
ab185921
SH
2795
2796err:
2797 qemu_vfree(bounce_buffer);
2798 return ret;
2799}
2800
c5fbe571
SH
2801/*
2802 * Handle a read request in coroutine context
2803 */
2804static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
470c0504
SH
2805 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2806 BdrvRequestFlags flags)
da1fa91d
KW
2807{
2808 BlockDriver *drv = bs->drv;
dbffbdcf
SH
2809 BdrvTrackedRequest req;
2810 int ret;
da1fa91d 2811
da1fa91d
KW
2812 if (!drv) {
2813 return -ENOMEDIUM;
2814 }
2815 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2816 return -EIO;
2817 }
2818
f4658285 2819 if (bs->copy_on_read) {
470c0504
SH
2820 flags |= BDRV_REQ_COPY_ON_READ;
2821 }
2822 if (flags & BDRV_REQ_COPY_ON_READ) {
2823 bs->copy_on_read_in_flight++;
2824 }
2825
2826 if (bs->copy_on_read_in_flight) {
f4658285
SH
2827 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2828 }
2829
cc0681c4
BC
2830 /* throttling disk I/O */
2831 if (bs->io_limits_enabled) {
2832 bdrv_io_limits_intercept(bs, nb_sectors, false);
2833 }
2834
dbffbdcf 2835 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
ab185921 2836
470c0504 2837 if (flags & BDRV_REQ_COPY_ON_READ) {
ab185921
SH
2838 int pnum;
2839
bdad13b9 2840 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
ab185921
SH
2841 if (ret < 0) {
2842 goto out;
2843 }
2844
2845 if (!ret || pnum != nb_sectors) {
470c0504 2846 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
ab185921
SH
2847 goto out;
2848 }
2849 }
2850
893a8f62
MK
2851 if (!(bs->zero_beyond_eof && bs->growable)) {
2852 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
2853 } else {
2854 /* Read zeros after EOF of growable BDSes */
2855 int64_t len, total_sectors, max_nb_sectors;
2856
2857 len = bdrv_getlength(bs);
2858 if (len < 0) {
2859 ret = len;
2860 goto out;
2861 }
2862
d055a1fe 2863 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
893a8f62
MK
2864 max_nb_sectors = MAX(0, total_sectors - sector_num);
2865 if (max_nb_sectors > 0) {
2866 ret = drv->bdrv_co_readv(bs, sector_num,
2867 MIN(nb_sectors, max_nb_sectors), qiov);
2868 } else {
2869 ret = 0;
2870 }
2871
2872 /* Reading beyond end of file is supposed to produce zeroes */
2873 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
2874 uint64_t offset = MAX(0, total_sectors - sector_num);
2875 uint64_t bytes = (sector_num + nb_sectors - offset) *
2876 BDRV_SECTOR_SIZE;
2877 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
2878 }
2879 }
ab185921
SH
2880
2881out:
dbffbdcf 2882 tracked_request_end(&req);
470c0504
SH
2883
2884 if (flags & BDRV_REQ_COPY_ON_READ) {
2885 bs->copy_on_read_in_flight--;
2886 }
2887
dbffbdcf 2888 return ret;
da1fa91d
KW
2889}
2890
c5fbe571 2891int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
da1fa91d
KW
2892 int nb_sectors, QEMUIOVector *qiov)
2893{
c5fbe571 2894 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
da1fa91d 2895
470c0504
SH
2896 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
2897}
2898
2899int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
2900 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2901{
2902 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
2903
2904 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
2905 BDRV_REQ_COPY_ON_READ);
c5fbe571
SH
2906}
2907
c31cb707
PL
2908/* if no limit is specified in the BlockLimits use a default
2909 * of 32768 512-byte sectors (16 MiB) per request.
2910 */
2911#define MAX_WRITE_ZEROES_DEFAULT 32768
2912
f08f2dda 2913static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
aa7bfbff 2914 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
f08f2dda
SH
2915{
2916 BlockDriver *drv = bs->drv;
2917 QEMUIOVector qiov;
c31cb707
PL
2918 struct iovec iov = {0};
2919 int ret = 0;
f08f2dda 2920
c31cb707
PL
2921 int max_write_zeroes = bs->bl.max_write_zeroes ?
2922 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
621f0589 2923
c31cb707
PL
2924 while (nb_sectors > 0 && !ret) {
2925 int num = nb_sectors;
2926
b8d71c09
PB
2927 /* Align request. Block drivers can expect the "bulk" of the request
2928 * to be aligned.
2929 */
2930 if (bs->bl.write_zeroes_alignment
2931 && num > bs->bl.write_zeroes_alignment) {
2932 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
2933 /* Make a small request up to the first aligned sector. */
c31cb707 2934 num = bs->bl.write_zeroes_alignment;
b8d71c09
PB
2935 num -= sector_num % bs->bl.write_zeroes_alignment;
2936 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
2937 /* Shorten the request to the last aligned sector. num cannot
2938 * underflow because num > bs->bl.write_zeroes_alignment.
2939 */
2940 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
c31cb707 2941 }
621f0589 2942 }
f08f2dda 2943
c31cb707
PL
2944 /* limit request size */
2945 if (num > max_write_zeroes) {
2946 num = max_write_zeroes;
2947 }
2948
2949 ret = -ENOTSUP;
2950 /* First try the efficient write zeroes operation */
2951 if (drv->bdrv_co_write_zeroes) {
2952 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
2953 }
2954
2955 if (ret == -ENOTSUP) {
2956 /* Fall back to bounce buffer if write zeroes is unsupported */
2957 iov.iov_len = num * BDRV_SECTOR_SIZE;
2958 if (iov.iov_base == NULL) {
b8d71c09
PB
2959 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
2960 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
c31cb707
PL
2961 }
2962 qemu_iovec_init_external(&qiov, &iov, 1);
f08f2dda 2963
c31cb707 2964 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
b8d71c09
PB
2965
2966 /* Keep bounce buffer around if it is big enough for all
2967 * all future requests.
2968 */
2969 if (num < max_write_zeroes) {
2970 qemu_vfree(iov.iov_base);
2971 iov.iov_base = NULL;
2972 }
c31cb707
PL
2973 }
2974
2975 sector_num += num;
2976 nb_sectors -= num;
2977 }
f08f2dda
SH
2978
2979 qemu_vfree(iov.iov_base);
2980 return ret;
2981}
2982
c5fbe571
SH
2983/*
2984 * Handle a write request in coroutine context
2985 */
2986static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
f08f2dda
SH
2987 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2988 BdrvRequestFlags flags)
c5fbe571
SH
2989{
2990 BlockDriver *drv = bs->drv;
dbffbdcf 2991 BdrvTrackedRequest req;
6b7cb247 2992 int ret;
da1fa91d
KW
2993
2994 if (!bs->drv) {
2995 return -ENOMEDIUM;
2996 }
2997 if (bs->read_only) {
2998 return -EACCES;
2999 }
3000 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3001 return -EIO;
3002 }
3003
470c0504 3004 if (bs->copy_on_read_in_flight) {
f4658285
SH
3005 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
3006 }
3007
cc0681c4
BC
3008 /* throttling disk I/O */
3009 if (bs->io_limits_enabled) {
3010 bdrv_io_limits_intercept(bs, nb_sectors, true);
3011 }
3012
dbffbdcf
SH
3013 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
3014
d616b224
SH
3015 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
3016
3017 if (ret < 0) {
3018 /* Do nothing, write notifier decided to fail this request */
3019 } else if (flags & BDRV_REQ_ZERO_WRITE) {
aa7bfbff 3020 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
f08f2dda
SH
3021 } else {
3022 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3023 }
6b7cb247 3024
f05fa4ad
PB
3025 if (ret == 0 && !bs->enable_write_cache) {
3026 ret = bdrv_co_flush(bs);
3027 }
3028
e4654d2d 3029 bdrv_set_dirty(bs, sector_num, nb_sectors);
da1fa91d
KW
3030
3031 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3032 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3033 }
df2a6f29
PB
3034 if (bs->growable && ret >= 0) {
3035 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3036 }
da1fa91d 3037
dbffbdcf
SH
3038 tracked_request_end(&req);
3039
6b7cb247 3040 return ret;
da1fa91d
KW
3041}
3042
c5fbe571
SH
3043int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3044 int nb_sectors, QEMUIOVector *qiov)
3045{
3046 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3047
f08f2dda
SH
3048 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3049}
3050
3051int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
aa7bfbff
PL
3052 int64_t sector_num, int nb_sectors,
3053 BdrvRequestFlags flags)
f08f2dda 3054{
94d6ff21 3055 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
f08f2dda 3056
d32f35cb
PL
3057 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3058 flags &= ~BDRV_REQ_MAY_UNMAP;
3059 }
3060
f08f2dda 3061 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
aa7bfbff 3062 BDRV_REQ_ZERO_WRITE | flags);
c5fbe571
SH
3063}
3064
83f64091
FB
3065/**
3066 * Truncate file to 'offset' bytes (needed only for file protocols)
3067 */
3068int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3069{
3070 BlockDriver *drv = bs->drv;
51762288 3071 int ret;
83f64091 3072 if (!drv)
19cb3738 3073 return -ENOMEDIUM;
83f64091
FB
3074 if (!drv->bdrv_truncate)
3075 return -ENOTSUP;
59f2689d
NS
3076 if (bs->read_only)
3077 return -EACCES;
8591675f
MT
3078 if (bdrv_in_use(bs))
3079 return -EBUSY;
51762288
SH
3080 ret = drv->bdrv_truncate(bs, offset);
3081 if (ret == 0) {
3082 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
145feb17 3083 bdrv_dev_resize_cb(bs);
51762288
SH
3084 }
3085 return ret;
83f64091
FB
3086}
3087
4a1d5e1f
FZ
3088/**
3089 * Length of a allocated file in bytes. Sparse files are counted by actual
3090 * allocated space. Return < 0 if error or unknown.
3091 */
3092int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3093{
3094 BlockDriver *drv = bs->drv;
3095 if (!drv) {
3096 return -ENOMEDIUM;
3097 }
3098 if (drv->bdrv_get_allocated_file_size) {
3099 return drv->bdrv_get_allocated_file_size(bs);
3100 }
3101 if (bs->file) {
3102 return bdrv_get_allocated_file_size(bs->file);
3103 }
3104 return -ENOTSUP;
3105}
3106
83f64091
FB
3107/**
3108 * Length of a file in bytes. Return < 0 if error or unknown.
3109 */
3110int64_t bdrv_getlength(BlockDriverState *bs)
3111{
3112 BlockDriver *drv = bs->drv;
3113 if (!drv)
19cb3738 3114 return -ENOMEDIUM;
51762288 3115
b94a2610
KW
3116 if (drv->has_variable_length) {
3117 int ret = refresh_total_sectors(bs, bs->total_sectors);
3118 if (ret < 0) {
3119 return ret;
46a4e4e6 3120 }
83f64091 3121 }
46a4e4e6 3122 return bs->total_sectors * BDRV_SECTOR_SIZE;
fc01f7e7
FB
3123}
3124
19cb3738 3125/* return 0 as number of sectors if no device present or error */
96b8f136 3126void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
fc01f7e7 3127{
19cb3738
FB
3128 int64_t length;
3129 length = bdrv_getlength(bs);
3130 if (length < 0)
3131 length = 0;
3132 else
6ea44308 3133 length = length >> BDRV_SECTOR_BITS;
19cb3738 3134 *nb_sectors_ptr = length;
fc01f7e7 3135}
cf98951b 3136
ff06f5f3
PB
3137void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3138 BlockdevOnError on_write_error)
abd7f68d
MA
3139{
3140 bs->on_read_error = on_read_error;
3141 bs->on_write_error = on_write_error;
3142}
3143
1ceee0d5 3144BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
abd7f68d
MA
3145{
3146 return is_read ? bs->on_read_error : bs->on_write_error;
3147}
3148
3e1caa5f
PB
3149BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3150{
3151 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3152
3153 switch (on_err) {
3154 case BLOCKDEV_ON_ERROR_ENOSPC:
3155 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3156 case BLOCKDEV_ON_ERROR_STOP:
3157 return BDRV_ACTION_STOP;
3158 case BLOCKDEV_ON_ERROR_REPORT:
3159 return BDRV_ACTION_REPORT;
3160 case BLOCKDEV_ON_ERROR_IGNORE:
3161 return BDRV_ACTION_IGNORE;
3162 default:
3163 abort();
3164 }
3165}
3166
3167/* This is done by device models because, while the block layer knows
3168 * about the error, it does not know whether an operation comes from
3169 * the device or the block layer (from a job, for example).
3170 */
3171void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3172 bool is_read, int error)
3173{
3174 assert(error >= 0);
32c81a4a 3175 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3e1caa5f
PB
3176 if (action == BDRV_ACTION_STOP) {
3177 vm_stop(RUN_STATE_IO_ERROR);
3178 bdrv_iostatus_set_err(bs, error);
3179 }
3180}
3181
b338082b
FB
3182int bdrv_is_read_only(BlockDriverState *bs)
3183{
3184 return bs->read_only;
3185}
3186
985a03b0
TS
3187int bdrv_is_sg(BlockDriverState *bs)
3188{
3189 return bs->sg;
3190}
3191
e900a7b7
CH
3192int bdrv_enable_write_cache(BlockDriverState *bs)
3193{
3194 return bs->enable_write_cache;
3195}
3196
425b0148
PB
3197void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3198{
3199 bs->enable_write_cache = wce;
55b110f2
JC
3200
3201 /* so a reopen() will preserve wce */
3202 if (wce) {
3203 bs->open_flags |= BDRV_O_CACHE_WB;
3204 } else {
3205 bs->open_flags &= ~BDRV_O_CACHE_WB;
3206 }
425b0148
PB
3207}
3208
ea2384d3
FB
3209int bdrv_is_encrypted(BlockDriverState *bs)
3210{
3211 if (bs->backing_hd && bs->backing_hd->encrypted)
3212 return 1;
3213 return bs->encrypted;
3214}
3215
c0f4ce77
AL
3216int bdrv_key_required(BlockDriverState *bs)
3217{
3218 BlockDriverState *backing_hd = bs->backing_hd;
3219
3220 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3221 return 1;
3222 return (bs->encrypted && !bs->valid_key);
3223}
3224
ea2384d3
FB
3225int bdrv_set_key(BlockDriverState *bs, const char *key)
3226{
3227 int ret;
3228 if (bs->backing_hd && bs->backing_hd->encrypted) {
3229 ret = bdrv_set_key(bs->backing_hd, key);
3230 if (ret < 0)
3231 return ret;
3232 if (!bs->encrypted)
3233 return 0;
3234 }
fd04a2ae
SH
3235 if (!bs->encrypted) {
3236 return -EINVAL;
3237 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3238 return -ENOMEDIUM;
3239 }
c0f4ce77 3240 ret = bs->drv->bdrv_set_key(bs, key);
bb5fc20f
AL
3241 if (ret < 0) {
3242 bs->valid_key = 0;
3243 } else if (!bs->valid_key) {
3244 bs->valid_key = 1;
3245 /* call the change callback now, we skipped it on open */
7d4b4ba5 3246 bdrv_dev_change_media_cb(bs, true);
bb5fc20f 3247 }
c0f4ce77 3248 return ret;
ea2384d3
FB
3249}
3250
f8d6bba1 3251const char *bdrv_get_format_name(BlockDriverState *bs)
ea2384d3 3252{
f8d6bba1 3253 return bs->drv ? bs->drv->format_name : NULL;
ea2384d3
FB
3254}
3255
5fafdf24 3256void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
ea2384d3
FB
3257 void *opaque)
3258{
3259 BlockDriver *drv;
3260
8a22f02a 3261 QLIST_FOREACH(drv, &bdrv_drivers, list) {
ea2384d3
FB
3262 it(opaque, drv->format_name);
3263 }
3264}
3265
dc364f4c 3266/* This function is to find block backend bs */
b338082b
FB
3267BlockDriverState *bdrv_find(const char *name)
3268{
3269 BlockDriverState *bs;
3270
dc364f4c 3271 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1b7bdbc1 3272 if (!strcmp(name, bs->device_name)) {
b338082b 3273 return bs;
1b7bdbc1 3274 }
b338082b
FB
3275 }
3276 return NULL;
3277}
3278
dc364f4c
BC
3279/* This function is to find a node in the bs graph */
3280BlockDriverState *bdrv_find_node(const char *node_name)
3281{
3282 BlockDriverState *bs;
3283
3284 assert(node_name);
3285
3286 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3287 if (!strcmp(node_name, bs->node_name)) {
3288 return bs;
3289 }
3290 }
3291 return NULL;
3292}
3293
2f399b0a
MA
3294BlockDriverState *bdrv_next(BlockDriverState *bs)
3295{
3296 if (!bs) {
3297 return QTAILQ_FIRST(&bdrv_states);
3298 }
dc364f4c 3299 return QTAILQ_NEXT(bs, device_list);
2f399b0a
MA
3300}
3301
51de9760 3302void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
81d0912d
FB
3303{
3304 BlockDriverState *bs;
3305
dc364f4c 3306 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
51de9760 3307 it(opaque, bs);
81d0912d
FB
3308 }
3309}
3310
ea2384d3
FB
3311const char *bdrv_get_device_name(BlockDriverState *bs)
3312{
3313 return bs->device_name;
3314}
3315
c8433287
MA
3316int bdrv_get_flags(BlockDriverState *bs)
3317{
3318 return bs->open_flags;
3319}
3320
f0f0fdfe 3321int bdrv_flush_all(void)
c6ca28d6
AL
3322{
3323 BlockDriverState *bs;
f0f0fdfe 3324 int result = 0;
c6ca28d6 3325
dc364f4c 3326 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
f0f0fdfe
KW
3327 int ret = bdrv_flush(bs);
3328 if (ret < 0 && !result) {
3329 result = ret;
3330 }
1b7bdbc1 3331 }
f0f0fdfe
KW
3332
3333 return result;
c6ca28d6
AL
3334}
3335
3ac21627
PL
3336int bdrv_has_zero_init_1(BlockDriverState *bs)
3337{
3338 return 1;
3339}
3340
f2feebbd
KW
3341int bdrv_has_zero_init(BlockDriverState *bs)
3342{
3343 assert(bs->drv);
3344
11212d8f
PB
3345 /* If BS is a copy on write image, it is initialized to
3346 the contents of the base image, which may not be zeroes. */
3347 if (bs->backing_hd) {
3348 return 0;
3349 }
336c1c12
KW
3350 if (bs->drv->bdrv_has_zero_init) {
3351 return bs->drv->bdrv_has_zero_init(bs);
f2feebbd
KW
3352 }
3353
3ac21627
PL
3354 /* safe default */
3355 return 0;
f2feebbd
KW
3356}
3357
4ce78691
PL
3358bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3359{
3360 BlockDriverInfo bdi;
3361
3362 if (bs->backing_hd) {
3363 return false;
3364 }
3365
3366 if (bdrv_get_info(bs, &bdi) == 0) {
3367 return bdi.unallocated_blocks_are_zero;
3368 }
3369
3370 return false;
3371}
3372
3373bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3374{
3375 BlockDriverInfo bdi;
3376
3377 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3378 return false;
3379 }
3380
3381 if (bdrv_get_info(bs, &bdi) == 0) {
3382 return bdi.can_write_zeroes_with_unmap;
3383 }
3384
3385 return false;
3386}
3387
b6b8a333 3388typedef struct BdrvCoGetBlockStatusData {
376ae3f1 3389 BlockDriverState *bs;
b35b2bba 3390 BlockDriverState *base;
376ae3f1
SH
3391 int64_t sector_num;
3392 int nb_sectors;
3393 int *pnum;
b6b8a333 3394 int64_t ret;
376ae3f1 3395 bool done;
b6b8a333 3396} BdrvCoGetBlockStatusData;
376ae3f1 3397
f58c7b35
TS
3398/*
3399 * Returns true iff the specified sector is present in the disk image. Drivers
3400 * not implementing the functionality are assumed to not support backing files,
3401 * hence all their sectors are reported as allocated.
3402 *
bd9533e3
SH
3403 * If 'sector_num' is beyond the end of the disk image the return value is 0
3404 * and 'pnum' is set to 0.
3405 *
f58c7b35
TS
3406 * 'pnum' is set to the number of sectors (including and immediately following
3407 * the specified sector) that are known to be in the same
3408 * allocated/unallocated state.
3409 *
bd9533e3
SH
3410 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3411 * beyond the end of the disk image it will be clamped.
f58c7b35 3412 */
b6b8a333
PB
3413static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3414 int64_t sector_num,
3415 int nb_sectors, int *pnum)
f58c7b35 3416{
617ccb46 3417 int64_t length;
bd9533e3 3418 int64_t n;
5daa74a6 3419 int64_t ret, ret2;
bd9533e3 3420
617ccb46
PB
3421 length = bdrv_getlength(bs);
3422 if (length < 0) {
3423 return length;
3424 }
3425
3426 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
bd9533e3
SH
3427 *pnum = 0;
3428 return 0;
3429 }
3430
3431 n = bs->total_sectors - sector_num;
3432 if (n < nb_sectors) {
3433 nb_sectors = n;
3434 }
3435
b6b8a333 3436 if (!bs->drv->bdrv_co_get_block_status) {
bd9533e3 3437 *pnum = nb_sectors;
918e92d7
PB
3438 ret = BDRV_BLOCK_DATA;
3439 if (bs->drv->protocol_name) {
3440 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3441 }
3442 return ret;
f58c7b35 3443 }
6aebab14 3444
415b5b01
PB
3445 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3446 if (ret < 0) {
3e0a233d 3447 *pnum = 0;
415b5b01
PB
3448 return ret;
3449 }
3450
92bc50a5
PL
3451 if (ret & BDRV_BLOCK_RAW) {
3452 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3453 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3454 *pnum, pnum);
3455 }
3456
c3d86884
PL
3457 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3458 if (bdrv_unallocated_blocks_are_zero(bs)) {
f0ad5712 3459 ret |= BDRV_BLOCK_ZERO;
1f9db224 3460 } else if (bs->backing_hd) {
f0ad5712
PB
3461 BlockDriverState *bs2 = bs->backing_hd;
3462 int64_t length2 = bdrv_getlength(bs2);
3463 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3464 ret |= BDRV_BLOCK_ZERO;
3465 }
3466 }
415b5b01 3467 }
5daa74a6
PB
3468
3469 if (bs->file &&
3470 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3471 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3472 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3473 *pnum, pnum);
3474 if (ret2 >= 0) {
3475 /* Ignore errors. This is just providing extra information, it
3476 * is useful but not necessary.
3477 */
3478 ret |= (ret2 & BDRV_BLOCK_ZERO);
3479 }
3480 }
3481
415b5b01 3482 return ret;
060f51c9
SH
3483}
3484
b6b8a333
PB
3485/* Coroutine wrapper for bdrv_get_block_status() */
3486static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
060f51c9 3487{
b6b8a333 3488 BdrvCoGetBlockStatusData *data = opaque;
060f51c9
SH
3489 BlockDriverState *bs = data->bs;
3490
b6b8a333
PB
3491 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
3492 data->pnum);
060f51c9
SH
3493 data->done = true;
3494}
3495
3496/*
b6b8a333 3497 * Synchronous wrapper around bdrv_co_get_block_status().
060f51c9 3498 *
b6b8a333 3499 * See bdrv_co_get_block_status() for details.
060f51c9 3500 */
b6b8a333
PB
3501int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
3502 int nb_sectors, int *pnum)
060f51c9 3503{
6aebab14 3504 Coroutine *co;
b6b8a333 3505 BdrvCoGetBlockStatusData data = {
6aebab14
SH
3506 .bs = bs,
3507 .sector_num = sector_num,
3508 .nb_sectors = nb_sectors,
3509 .pnum = pnum,
3510 .done = false,
3511 };
3512
bdad13b9
PB
3513 if (qemu_in_coroutine()) {
3514 /* Fast-path if already in coroutine context */
b6b8a333 3515 bdrv_get_block_status_co_entry(&data);
bdad13b9 3516 } else {
b6b8a333 3517 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
bdad13b9
PB
3518 qemu_coroutine_enter(co, &data);
3519 while (!data.done) {
3520 qemu_aio_wait();
3521 }
6aebab14
SH
3522 }
3523 return data.ret;
f58c7b35
TS
3524}
3525
b6b8a333
PB
3526int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
3527 int nb_sectors, int *pnum)
3528{
4333bb71
PB
3529 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
3530 if (ret < 0) {
3531 return ret;
3532 }
3533 return
3534 (ret & BDRV_BLOCK_DATA) ||
3535 ((ret & BDRV_BLOCK_ZERO) && !bdrv_has_zero_init(bs));
b6b8a333
PB
3536}
3537
188a7bbf
PB
3538/*
3539 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3540 *
3541 * Return true if the given sector is allocated in any image between
3542 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3543 * sector is allocated in any image of the chain. Return false otherwise.
3544 *
3545 * 'pnum' is set to the number of sectors (including and immediately following
3546 * the specified sector) that are known to be in the same
3547 * allocated/unallocated state.
3548 *
3549 */
4f578637
PB
3550int bdrv_is_allocated_above(BlockDriverState *top,
3551 BlockDriverState *base,
3552 int64_t sector_num,
3553 int nb_sectors, int *pnum)
188a7bbf
PB
3554{
3555 BlockDriverState *intermediate;
3556 int ret, n = nb_sectors;
3557
3558 intermediate = top;
3559 while (intermediate && intermediate != base) {
3560 int pnum_inter;
bdad13b9
PB
3561 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
3562 &pnum_inter);
188a7bbf
PB
3563 if (ret < 0) {
3564 return ret;
3565 } else if (ret) {
3566 *pnum = pnum_inter;
3567 return 1;
3568 }
3569
3570 /*
3571 * [sector_num, nb_sectors] is unallocated on top but intermediate
3572 * might have
3573 *
3574 * [sector_num+x, nr_sectors] allocated.
3575 */
63ba17d3
VI
3576 if (n > pnum_inter &&
3577 (intermediate == top ||
3578 sector_num + pnum_inter < intermediate->total_sectors)) {
188a7bbf
PB
3579 n = pnum_inter;
3580 }
3581
3582 intermediate = intermediate->backing_hd;
3583 }
3584
3585 *pnum = n;
3586 return 0;
3587}
3588
045df330
AL
3589const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
3590{
3591 if (bs->backing_hd && bs->backing_hd->encrypted)
3592 return bs->backing_file;
3593 else if (bs->encrypted)
3594 return bs->filename;
3595 else
3596 return NULL;
3597}
3598
5fafdf24 3599void bdrv_get_backing_filename(BlockDriverState *bs,
83f64091
FB
3600 char *filename, int filename_size)
3601{
3574c608 3602 pstrcpy(filename, filename_size, bs->backing_file);
83f64091
FB
3603}
3604
5fafdf24 3605int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
faea38e7
FB
3606 const uint8_t *buf, int nb_sectors)
3607{
3608 BlockDriver *drv = bs->drv;
3609 if (!drv)
19cb3738 3610 return -ENOMEDIUM;
faea38e7
FB
3611 if (!drv->bdrv_write_compressed)
3612 return -ENOTSUP;
fbb7b4e0
KW
3613 if (bdrv_check_request(bs, sector_num, nb_sectors))
3614 return -EIO;
a55eb92c 3615
e4654d2d 3616 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
a55eb92c 3617
faea38e7
FB
3618 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
3619}
3b46e624 3620
faea38e7
FB
3621int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
3622{
3623 BlockDriver *drv = bs->drv;
3624 if (!drv)
19cb3738 3625 return -ENOMEDIUM;
faea38e7
FB
3626 if (!drv->bdrv_get_info)
3627 return -ENOTSUP;
3628 memset(bdi, 0, sizeof(*bdi));
3629 return drv->bdrv_get_info(bs, bdi);
3630}
3631
eae041fe
HR
3632ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
3633{
3634 BlockDriver *drv = bs->drv;
3635 if (drv && drv->bdrv_get_specific_info) {
3636 return drv->bdrv_get_specific_info(bs);
3637 }
3638 return NULL;
3639}
3640
45566e9c
CH
3641int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
3642 int64_t pos, int size)
cf8074b3
KW
3643{
3644 QEMUIOVector qiov;
3645 struct iovec iov = {
3646 .iov_base = (void *) buf,
3647 .iov_len = size,
3648 };
3649
3650 qemu_iovec_init_external(&qiov, &iov, 1);
3651 return bdrv_writev_vmstate(bs, &qiov, pos);
3652}
3653
3654int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
178e08a5
AL
3655{
3656 BlockDriver *drv = bs->drv;
cf8074b3
KW
3657
3658 if (!drv) {
178e08a5 3659 return -ENOMEDIUM;
cf8074b3
KW
3660 } else if (drv->bdrv_save_vmstate) {
3661 return drv->bdrv_save_vmstate(bs, qiov, pos);
3662 } else if (bs->file) {
3663 return bdrv_writev_vmstate(bs->file, qiov, pos);
3664 }
3665
7cdb1f6d 3666 return -ENOTSUP;
178e08a5
AL
3667}
3668
45566e9c
CH
3669int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
3670 int64_t pos, int size)
178e08a5
AL
3671{
3672 BlockDriver *drv = bs->drv;
3673 if (!drv)
3674 return -ENOMEDIUM;
7cdb1f6d
MK
3675 if (drv->bdrv_load_vmstate)
3676 return drv->bdrv_load_vmstate(bs, buf, pos, size);
3677 if (bs->file)
3678 return bdrv_load_vmstate(bs->file, buf, pos, size);
3679 return -ENOTSUP;
178e08a5
AL
3680}
3681
8b9b0cc2
KW
3682void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
3683{
bf736fe3 3684 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
8b9b0cc2
KW
3685 return;
3686 }
3687
bf736fe3 3688 bs->drv->bdrv_debug_event(bs, event);
41c695c7
KW
3689}
3690
3691int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
3692 const char *tag)
3693{
3694 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
3695 bs = bs->file;
3696 }
3697
3698 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
3699 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
3700 }
3701
3702 return -ENOTSUP;
3703}
3704
4cc70e93
FZ
3705int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
3706{
3707 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
3708 bs = bs->file;
3709 }
3710
3711 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
3712 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
3713 }
3714
3715 return -ENOTSUP;
3716}
3717
41c695c7
KW
3718int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
3719{
3720 while (bs && bs->drv && !bs->drv->bdrv_debug_resume) {
3721 bs = bs->file;
3722 }
8b9b0cc2 3723
41c695c7
KW
3724 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
3725 return bs->drv->bdrv_debug_resume(bs, tag);
3726 }
3727
3728 return -ENOTSUP;
3729}
3730
3731bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
3732{
3733 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
3734 bs = bs->file;
3735 }
3736
3737 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
3738 return bs->drv->bdrv_debug_is_suspended(bs, tag);
3739 }
3740
3741 return false;
8b9b0cc2
KW
3742}
3743
199630b6
BS
3744int bdrv_is_snapshot(BlockDriverState *bs)
3745{
3746 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
3747}
3748
b1b1d783
JC
3749/* backing_file can either be relative, or absolute, or a protocol. If it is
3750 * relative, it must be relative to the chain. So, passing in bs->filename
3751 * from a BDS as backing_file should not be done, as that may be relative to
3752 * the CWD rather than the chain. */
e8a6bb9c
MT
3753BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
3754 const char *backing_file)
3755{
b1b1d783
JC
3756 char *filename_full = NULL;
3757 char *backing_file_full = NULL;
3758 char *filename_tmp = NULL;
3759 int is_protocol = 0;
3760 BlockDriverState *curr_bs = NULL;
3761 BlockDriverState *retval = NULL;
3762
3763 if (!bs || !bs->drv || !backing_file) {
e8a6bb9c
MT
3764 return NULL;
3765 }
3766
b1b1d783
JC
3767 filename_full = g_malloc(PATH_MAX);
3768 backing_file_full = g_malloc(PATH_MAX);
3769 filename_tmp = g_malloc(PATH_MAX);
3770
3771 is_protocol = path_has_protocol(backing_file);
3772
3773 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
3774
3775 /* If either of the filename paths is actually a protocol, then
3776 * compare unmodified paths; otherwise make paths relative */
3777 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
3778 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
3779 retval = curr_bs->backing_hd;
3780 break;
3781 }
e8a6bb9c 3782 } else {
b1b1d783
JC
3783 /* If not an absolute filename path, make it relative to the current
3784 * image's filename path */
3785 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
3786 backing_file);
3787
3788 /* We are going to compare absolute pathnames */
3789 if (!realpath(filename_tmp, filename_full)) {
3790 continue;
3791 }
3792
3793 /* We need to make sure the backing filename we are comparing against
3794 * is relative to the current image filename (or absolute) */
3795 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
3796 curr_bs->backing_file);
3797
3798 if (!realpath(filename_tmp, backing_file_full)) {
3799 continue;
3800 }
3801
3802 if (strcmp(backing_file_full, filename_full) == 0) {
3803 retval = curr_bs->backing_hd;
3804 break;
3805 }
e8a6bb9c
MT
3806 }
3807 }
3808
b1b1d783
JC
3809 g_free(filename_full);
3810 g_free(backing_file_full);
3811 g_free(filename_tmp);
3812 return retval;
e8a6bb9c
MT
3813}
3814
f198fd1c
BC
3815int bdrv_get_backing_file_depth(BlockDriverState *bs)
3816{
3817 if (!bs->drv) {
3818 return 0;
3819 }
3820
3821 if (!bs->backing_hd) {
3822 return 0;
3823 }
3824
3825 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
3826}
3827
79fac568
JC
3828BlockDriverState *bdrv_find_base(BlockDriverState *bs)
3829{
3830 BlockDriverState *curr_bs = NULL;
3831
3832 if (!bs) {
3833 return NULL;
3834 }
3835
3836 curr_bs = bs;
3837
3838 while (curr_bs->backing_hd) {
3839 curr_bs = curr_bs->backing_hd;
3840 }
3841 return curr_bs;
3842}
3843
ea2384d3 3844/**************************************************************/
83f64091 3845/* async I/Os */
ea2384d3 3846
3b69e4b9 3847BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
f141eafe 3848 QEMUIOVector *qiov, int nb_sectors,
3b69e4b9 3849 BlockDriverCompletionFunc *cb, void *opaque)
83f64091 3850{
bbf0a440
SH
3851 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
3852
d20d9b7c 3853 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
8c5873d6 3854 cb, opaque, false);
ea2384d3
FB
3855}
3856
f141eafe
AL
3857BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
3858 QEMUIOVector *qiov, int nb_sectors,
3859 BlockDriverCompletionFunc *cb, void *opaque)
ea2384d3 3860{
bbf0a440
SH
3861 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
3862
d20d9b7c 3863 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
8c5873d6 3864 cb, opaque, true);
83f64091
FB
3865}
3866
d5ef94d4
PB
3867BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
3868 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
3869 BlockDriverCompletionFunc *cb, void *opaque)
3870{
3871 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
3872
3873 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
3874 BDRV_REQ_ZERO_WRITE | flags,
3875 cb, opaque, true);
3876}
3877
40b4f539
KW
3878
3879typedef struct MultiwriteCB {
3880 int error;
3881 int num_requests;
3882 int num_callbacks;
3883 struct {
3884 BlockDriverCompletionFunc *cb;
3885 void *opaque;
3886 QEMUIOVector *free_qiov;
40b4f539
KW
3887 } callbacks[];
3888} MultiwriteCB;
3889
3890static void multiwrite_user_cb(MultiwriteCB *mcb)
3891{
3892 int i;
3893
3894 for (i = 0; i < mcb->num_callbacks; i++) {
3895 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
1e1ea48d
SH
3896 if (mcb->callbacks[i].free_qiov) {
3897 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
3898 }
7267c094 3899 g_free(mcb->callbacks[i].free_qiov);
40b4f539
KW
3900 }
3901}
3902
3903static void multiwrite_cb(void *opaque, int ret)
3904{
3905 MultiwriteCB *mcb = opaque;
3906
6d519a5f
SH
3907 trace_multiwrite_cb(mcb, ret);
3908
cb6d3ca0 3909 if (ret < 0 && !mcb->error) {
40b4f539 3910 mcb->error = ret;
40b4f539
KW
3911 }
3912
3913 mcb->num_requests--;
3914 if (mcb->num_requests == 0) {
de189a1b 3915 multiwrite_user_cb(mcb);
7267c094 3916 g_free(mcb);
40b4f539
KW
3917 }
3918}
3919
3920static int multiwrite_req_compare(const void *a, const void *b)
3921{
77be4366
CH
3922 const BlockRequest *req1 = a, *req2 = b;
3923
3924 /*
3925 * Note that we can't simply subtract req2->sector from req1->sector
3926 * here as that could overflow the return value.
3927 */
3928 if (req1->sector > req2->sector) {
3929 return 1;
3930 } else if (req1->sector < req2->sector) {
3931 return -1;
3932 } else {
3933 return 0;
3934 }
40b4f539
KW
3935}
3936
3937/*
3938 * Takes a bunch of requests and tries to merge them. Returns the number of
3939 * requests that remain after merging.
3940 */
3941static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
3942 int num_reqs, MultiwriteCB *mcb)
3943{
3944 int i, outidx;
3945
3946 // Sort requests by start sector
3947 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
3948
3949 // Check if adjacent requests touch the same clusters. If so, combine them,
3950 // filling up gaps with zero sectors.
3951 outidx = 0;
3952 for (i = 1; i < num_reqs; i++) {
3953 int merge = 0;
3954 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
3955
b6a127a1 3956 // Handle exactly sequential writes and overlapping writes.
40b4f539
KW
3957 if (reqs[i].sector <= oldreq_last) {
3958 merge = 1;
3959 }
3960
e2a305fb
CH
3961 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
3962 merge = 0;
3963 }
3964
40b4f539
KW
3965 if (merge) {
3966 size_t size;
7267c094 3967 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
40b4f539
KW
3968 qemu_iovec_init(qiov,
3969 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
3970
3971 // Add the first request to the merged one. If the requests are
3972 // overlapping, drop the last sectors of the first request.
3973 size = (reqs[i].sector - reqs[outidx].sector) << 9;
1b093c48 3974 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
40b4f539 3975
b6a127a1
PB
3976 // We should need to add any zeros between the two requests
3977 assert (reqs[i].sector <= oldreq_last);
40b4f539
KW
3978
3979 // Add the second request
1b093c48 3980 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
40b4f539 3981
cbf1dff2 3982 reqs[outidx].nb_sectors = qiov->size >> 9;
40b4f539
KW
3983 reqs[outidx].qiov = qiov;
3984
3985 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
3986 } else {
3987 outidx++;
3988 reqs[outidx].sector = reqs[i].sector;
3989 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
3990 reqs[outidx].qiov = reqs[i].qiov;
3991 }
3992 }
3993
3994 return outidx + 1;
3995}
3996
3997/*
3998 * Submit multiple AIO write requests at once.
3999 *
4000 * On success, the function returns 0 and all requests in the reqs array have
4001 * been submitted. In error case this function returns -1, and any of the
4002 * requests may or may not be submitted yet. In particular, this means that the
4003 * callback will be called for some of the requests, for others it won't. The
4004 * caller must check the error field of the BlockRequest to wait for the right
4005 * callbacks (if error != 0, no callback will be called).
4006 *
4007 * The implementation may modify the contents of the reqs array, e.g. to merge
4008 * requests. However, the fields opaque and error are left unmodified as they
4009 * are used to signal failure for a single request to the caller.
4010 */
4011int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4012{
40b4f539
KW
4013 MultiwriteCB *mcb;
4014 int i;
4015
301db7c2
RH
4016 /* don't submit writes if we don't have a medium */
4017 if (bs->drv == NULL) {
4018 for (i = 0; i < num_reqs; i++) {
4019 reqs[i].error = -ENOMEDIUM;
4020 }
4021 return -1;
4022 }
4023
40b4f539
KW
4024 if (num_reqs == 0) {
4025 return 0;
4026 }
4027
4028 // Create MultiwriteCB structure
7267c094 4029 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
40b4f539
KW
4030 mcb->num_requests = 0;
4031 mcb->num_callbacks = num_reqs;
4032
4033 for (i = 0; i < num_reqs; i++) {
4034 mcb->callbacks[i].cb = reqs[i].cb;
4035 mcb->callbacks[i].opaque = reqs[i].opaque;
4036 }
4037
4038 // Check for mergable requests
4039 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4040
6d519a5f
SH
4041 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4042
df9309fb
PB
4043 /* Run the aio requests. */
4044 mcb->num_requests = num_reqs;
40b4f539 4045 for (i = 0; i < num_reqs; i++) {
d20d9b7c
PB
4046 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4047 reqs[i].nb_sectors, reqs[i].flags,
4048 multiwrite_cb, mcb,
4049 true);
40b4f539
KW
4050 }
4051
4052 return 0;
40b4f539
KW
4053}
4054
83f64091 4055void bdrv_aio_cancel(BlockDriverAIOCB *acb)
83f64091 4056{
d7331bed 4057 acb->aiocb_info->cancel(acb);
83f64091
FB
4058}
4059
4060/**************************************************************/
4061/* async block device emulation */
4062
c16b5a2c
CH
4063typedef struct BlockDriverAIOCBSync {
4064 BlockDriverAIOCB common;
4065 QEMUBH *bh;
4066 int ret;
4067 /* vector translation state */
4068 QEMUIOVector *qiov;
4069 uint8_t *bounce;
4070 int is_write;
4071} BlockDriverAIOCBSync;
4072
4073static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4074{
b666d239
KW
4075 BlockDriverAIOCBSync *acb =
4076 container_of(blockacb, BlockDriverAIOCBSync, common);
6a7ad299 4077 qemu_bh_delete(acb->bh);
36afc451 4078 acb->bh = NULL;
c16b5a2c
CH
4079 qemu_aio_release(acb);
4080}
4081
d7331bed 4082static const AIOCBInfo bdrv_em_aiocb_info = {
c16b5a2c
CH
4083 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4084 .cancel = bdrv_aio_cancel_em,
4085};
4086
ce1a14dc 4087static void bdrv_aio_bh_cb(void *opaque)
83f64091 4088{
ce1a14dc 4089 BlockDriverAIOCBSync *acb = opaque;
f141eafe 4090
f141eafe 4091 if (!acb->is_write)
03396148 4092 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
ceb42de8 4093 qemu_vfree(acb->bounce);
ce1a14dc 4094 acb->common.cb(acb->common.opaque, acb->ret);
6a7ad299 4095 qemu_bh_delete(acb->bh);
36afc451 4096 acb->bh = NULL;
ce1a14dc 4097 qemu_aio_release(acb);
83f64091 4098}
beac80cd 4099
f141eafe
AL
4100static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4101 int64_t sector_num,
4102 QEMUIOVector *qiov,
4103 int nb_sectors,
4104 BlockDriverCompletionFunc *cb,
4105 void *opaque,
4106 int is_write)
4107
83f64091 4108{
ce1a14dc 4109 BlockDriverAIOCBSync *acb;
ce1a14dc 4110
d7331bed 4111 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
f141eafe
AL
4112 acb->is_write = is_write;
4113 acb->qiov = qiov;
e268ca52 4114 acb->bounce = qemu_blockalign(bs, qiov->size);
3f3aace8 4115 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
f141eafe
AL
4116
4117 if (is_write) {
d5e6b161 4118 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
1ed20acf 4119 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
f141eafe 4120 } else {
1ed20acf 4121 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
f141eafe
AL
4122 }
4123
ce1a14dc 4124 qemu_bh_schedule(acb->bh);
f141eafe 4125
ce1a14dc 4126 return &acb->common;
beac80cd
FB
4127}
4128
f141eafe
AL
4129static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4130 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
ce1a14dc 4131 BlockDriverCompletionFunc *cb, void *opaque)
beac80cd 4132{
f141eafe
AL
4133 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4134}
83f64091 4135
f141eafe
AL
4136static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4137 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4138 BlockDriverCompletionFunc *cb, void *opaque)
4139{
4140 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
beac80cd 4141}
beac80cd 4142
68485420
KW
4143
4144typedef struct BlockDriverAIOCBCoroutine {
4145 BlockDriverAIOCB common;
4146 BlockRequest req;
4147 bool is_write;
d318aea9 4148 bool *done;
68485420
KW
4149 QEMUBH* bh;
4150} BlockDriverAIOCBCoroutine;
4151
4152static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4153{
d318aea9
KW
4154 BlockDriverAIOCBCoroutine *acb =
4155 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4156 bool done = false;
4157
4158 acb->done = &done;
4159 while (!done) {
4160 qemu_aio_wait();
4161 }
68485420
KW
4162}
4163
d7331bed 4164static const AIOCBInfo bdrv_em_co_aiocb_info = {
68485420
KW
4165 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4166 .cancel = bdrv_aio_co_cancel_em,
4167};
4168
35246a68 4169static void bdrv_co_em_bh(void *opaque)
68485420
KW
4170{
4171 BlockDriverAIOCBCoroutine *acb = opaque;
4172
4173 acb->common.cb(acb->common.opaque, acb->req.error);
d318aea9
KW
4174
4175 if (acb->done) {
4176 *acb->done = true;
4177 }
4178
68485420
KW
4179 qemu_bh_delete(acb->bh);
4180 qemu_aio_release(acb);
4181}
4182
b2a61371
SH
4183/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4184static void coroutine_fn bdrv_co_do_rw(void *opaque)
4185{
4186 BlockDriverAIOCBCoroutine *acb = opaque;
4187 BlockDriverState *bs = acb->common.bs;
4188
4189 if (!acb->is_write) {
4190 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
d20d9b7c 4191 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
b2a61371
SH
4192 } else {
4193 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
d20d9b7c 4194 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
b2a61371
SH
4195 }
4196
35246a68 4197 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
b2a61371
SH
4198 qemu_bh_schedule(acb->bh);
4199}
4200
68485420
KW
4201static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4202 int64_t sector_num,
4203 QEMUIOVector *qiov,
4204 int nb_sectors,
d20d9b7c 4205 BdrvRequestFlags flags,
68485420
KW
4206 BlockDriverCompletionFunc *cb,
4207 void *opaque,
8c5873d6 4208 bool is_write)
68485420
KW
4209{
4210 Coroutine *co;
4211 BlockDriverAIOCBCoroutine *acb;
4212
d7331bed 4213 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
68485420
KW
4214 acb->req.sector = sector_num;
4215 acb->req.nb_sectors = nb_sectors;
4216 acb->req.qiov = qiov;
d20d9b7c 4217 acb->req.flags = flags;
68485420 4218 acb->is_write = is_write;
d318aea9 4219 acb->done = NULL;
68485420 4220
8c5873d6 4221 co = qemu_coroutine_create(bdrv_co_do_rw);
68485420
KW
4222 qemu_coroutine_enter(co, acb);
4223
4224 return &acb->common;
4225}
4226
07f07615 4227static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
b2e12bc6 4228{
07f07615
PB
4229 BlockDriverAIOCBCoroutine *acb = opaque;
4230 BlockDriverState *bs = acb->common.bs;
b2e12bc6 4231
07f07615
PB
4232 acb->req.error = bdrv_co_flush(bs);
4233 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
b2e12bc6 4234 qemu_bh_schedule(acb->bh);
b2e12bc6
CH
4235}
4236
07f07615 4237BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
016f5cf6
AG
4238 BlockDriverCompletionFunc *cb, void *opaque)
4239{
07f07615 4240 trace_bdrv_aio_flush(bs, opaque);
016f5cf6 4241
07f07615
PB
4242 Coroutine *co;
4243 BlockDriverAIOCBCoroutine *acb;
016f5cf6 4244
d7331bed 4245 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
d318aea9
KW
4246 acb->done = NULL;
4247
07f07615
PB
4248 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4249 qemu_coroutine_enter(co, acb);
016f5cf6 4250
016f5cf6
AG
4251 return &acb->common;
4252}
4253
4265d620
PB
4254static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4255{
4256 BlockDriverAIOCBCoroutine *acb = opaque;
4257 BlockDriverState *bs = acb->common.bs;
4258
4259 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4260 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
4261 qemu_bh_schedule(acb->bh);
4262}
4263
4264BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4265 int64_t sector_num, int nb_sectors,
4266 BlockDriverCompletionFunc *cb, void *opaque)
4267{
4268 Coroutine *co;
4269 BlockDriverAIOCBCoroutine *acb;
4270
4271 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4272
d7331bed 4273 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4265d620
PB
4274 acb->req.sector = sector_num;
4275 acb->req.nb_sectors = nb_sectors;
d318aea9 4276 acb->done = NULL;
4265d620
PB
4277 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4278 qemu_coroutine_enter(co, acb);
4279
4280 return &acb->common;
4281}
4282
ea2384d3
FB
4283void bdrv_init(void)
4284{
5efa9d5a 4285 module_call_init(MODULE_INIT_BLOCK);
ea2384d3 4286}
ce1a14dc 4287
eb852011
MA
4288void bdrv_init_with_whitelist(void)
4289{
4290 use_bdrv_whitelist = 1;
4291 bdrv_init();
4292}
4293
d7331bed 4294void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
c16b5a2c 4295 BlockDriverCompletionFunc *cb, void *opaque)
ce1a14dc 4296{
ce1a14dc
PB
4297 BlockDriverAIOCB *acb;
4298
d7331bed
SH
4299 acb = g_slice_alloc(aiocb_info->aiocb_size);
4300 acb->aiocb_info = aiocb_info;
ce1a14dc
PB
4301 acb->bs = bs;
4302 acb->cb = cb;
4303 acb->opaque = opaque;
4304 return acb;
4305}
4306
4307void qemu_aio_release(void *p)
4308{
d37c975f 4309 BlockDriverAIOCB *acb = p;
d7331bed 4310 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
ce1a14dc 4311}
19cb3738 4312
f9f05dc5
KW
4313/**************************************************************/
4314/* Coroutine block device emulation */
4315
4316typedef struct CoroutineIOCompletion {
4317 Coroutine *coroutine;
4318 int ret;
4319} CoroutineIOCompletion;
4320
4321static void bdrv_co_io_em_complete(void *opaque, int ret)
4322{
4323 CoroutineIOCompletion *co = opaque;
4324
4325 co->ret = ret;
4326 qemu_coroutine_enter(co->coroutine, NULL);
4327}
4328
4329static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4330 int nb_sectors, QEMUIOVector *iov,
4331 bool is_write)
4332{
4333 CoroutineIOCompletion co = {
4334 .coroutine = qemu_coroutine_self(),
4335 };
4336 BlockDriverAIOCB *acb;
4337
4338 if (is_write) {
a652d160
SH
4339 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4340 bdrv_co_io_em_complete, &co);
f9f05dc5 4341 } else {
a652d160
SH
4342 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4343 bdrv_co_io_em_complete, &co);
f9f05dc5
KW
4344 }
4345
59370aaa 4346 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
f9f05dc5
KW
4347 if (!acb) {
4348 return -EIO;
4349 }
4350 qemu_coroutine_yield();
4351
4352 return co.ret;
4353}
4354
4355static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4356 int64_t sector_num, int nb_sectors,
4357 QEMUIOVector *iov)
4358{
4359 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4360}
4361
4362static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4363 int64_t sector_num, int nb_sectors,
4364 QEMUIOVector *iov)
4365{
4366 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4367}
4368
07f07615 4369static void coroutine_fn bdrv_flush_co_entry(void *opaque)
e7a8a783 4370{
07f07615
PB
4371 RwCo *rwco = opaque;
4372
4373 rwco->ret = bdrv_co_flush(rwco->bs);
4374}
4375
4376int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4377{
eb489bb1
KW
4378 int ret;
4379
29cdb251 4380 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
07f07615 4381 return 0;
eb489bb1
KW
4382 }
4383
ca716364 4384 /* Write back cached data to the OS even with cache=unsafe */
bf736fe3 4385 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
eb489bb1
KW
4386 if (bs->drv->bdrv_co_flush_to_os) {
4387 ret = bs->drv->bdrv_co_flush_to_os(bs);
4388 if (ret < 0) {
4389 return ret;
4390 }
4391 }
4392
ca716364
KW
4393 /* But don't actually force it to the disk with cache=unsafe */
4394 if (bs->open_flags & BDRV_O_NO_FLUSH) {
d4c82329 4395 goto flush_parent;
ca716364
KW
4396 }
4397
bf736fe3 4398 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
eb489bb1 4399 if (bs->drv->bdrv_co_flush_to_disk) {
29cdb251 4400 ret = bs->drv->bdrv_co_flush_to_disk(bs);
07f07615
PB
4401 } else if (bs->drv->bdrv_aio_flush) {
4402 BlockDriverAIOCB *acb;
4403 CoroutineIOCompletion co = {
4404 .coroutine = qemu_coroutine_self(),
4405 };
4406
4407 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4408 if (acb == NULL) {
29cdb251 4409 ret = -EIO;
07f07615
PB
4410 } else {
4411 qemu_coroutine_yield();
29cdb251 4412 ret = co.ret;
07f07615 4413 }
07f07615
PB
4414 } else {
4415 /*
4416 * Some block drivers always operate in either writethrough or unsafe
4417 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4418 * know how the server works (because the behaviour is hardcoded or
4419 * depends on server-side configuration), so we can't ensure that
4420 * everything is safe on disk. Returning an error doesn't work because
4421 * that would break guests even if the server operates in writethrough
4422 * mode.
4423 *
4424 * Let's hope the user knows what he's doing.
4425 */
29cdb251 4426 ret = 0;
07f07615 4427 }
29cdb251
PB
4428 if (ret < 0) {
4429 return ret;
4430 }
4431
4432 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4433 * in the case of cache=unsafe, so there are no useless flushes.
4434 */
d4c82329 4435flush_parent:
29cdb251 4436 return bdrv_co_flush(bs->file);
07f07615
PB
4437}
4438
0f15423c
AL
4439void bdrv_invalidate_cache(BlockDriverState *bs)
4440{
4441 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
4442 bs->drv->bdrv_invalidate_cache(bs);
4443 }
4444}
4445
4446void bdrv_invalidate_cache_all(void)
4447{
4448 BlockDriverState *bs;
4449
dc364f4c 4450 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
0f15423c
AL
4451 bdrv_invalidate_cache(bs);
4452 }
4453}
4454
07789269
BC
4455void bdrv_clear_incoming_migration_all(void)
4456{
4457 BlockDriverState *bs;
4458
dc364f4c 4459 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
07789269
BC
4460 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
4461 }
4462}
4463
07f07615
PB
4464int bdrv_flush(BlockDriverState *bs)
4465{
4466 Coroutine *co;
4467 RwCo rwco = {
4468 .bs = bs,
4469 .ret = NOT_DONE,
e7a8a783 4470 };
e7a8a783 4471
07f07615
PB
4472 if (qemu_in_coroutine()) {
4473 /* Fast-path if already in coroutine context */
4474 bdrv_flush_co_entry(&rwco);
4475 } else {
4476 co = qemu_coroutine_create(bdrv_flush_co_entry);
4477 qemu_coroutine_enter(co, &rwco);
4478 while (rwco.ret == NOT_DONE) {
4479 qemu_aio_wait();
4480 }
e7a8a783 4481 }
07f07615
PB
4482
4483 return rwco.ret;
e7a8a783
KW
4484}
4485
4265d620
PB
4486static void coroutine_fn bdrv_discard_co_entry(void *opaque)
4487{
4488 RwCo *rwco = opaque;
4489
4490 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
4491}
4492
6f14da52
PL
4493/* if no limit is specified in the BlockLimits use a default
4494 * of 32768 512-byte sectors (16 MiB) per request.
4495 */
4496#define MAX_DISCARD_DEFAULT 32768
4497
4265d620
PB
4498int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
4499 int nb_sectors)
4500{
d51e9fe5
PB
4501 int max_discard;
4502
4265d620
PB
4503 if (!bs->drv) {
4504 return -ENOMEDIUM;
4505 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
4506 return -EIO;
4507 } else if (bs->read_only) {
4508 return -EROFS;
df702c9b
PB
4509 }
4510
e4654d2d 4511 bdrv_reset_dirty(bs, sector_num, nb_sectors);
df702c9b 4512
9e8f1835
PB
4513 /* Do nothing if disabled. */
4514 if (!(bs->open_flags & BDRV_O_UNMAP)) {
4515 return 0;
4516 }
4517
d51e9fe5
PB
4518 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
4519 return 0;
4520 }
6f14da52 4521
d51e9fe5
PB
4522 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
4523 while (nb_sectors > 0) {
4524 int ret;
4525 int num = nb_sectors;
6f14da52 4526
d51e9fe5
PB
4527 /* align request */
4528 if (bs->bl.discard_alignment &&
4529 num >= bs->bl.discard_alignment &&
4530 sector_num % bs->bl.discard_alignment) {
4531 if (num > bs->bl.discard_alignment) {
4532 num = bs->bl.discard_alignment;
6f14da52 4533 }
d51e9fe5
PB
4534 num -= sector_num % bs->bl.discard_alignment;
4535 }
6f14da52 4536
d51e9fe5
PB
4537 /* limit request size */
4538 if (num > max_discard) {
4539 num = max_discard;
4540 }
6f14da52 4541
d51e9fe5 4542 if (bs->drv->bdrv_co_discard) {
6f14da52 4543 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
d51e9fe5
PB
4544 } else {
4545 BlockDriverAIOCB *acb;
4546 CoroutineIOCompletion co = {
4547 .coroutine = qemu_coroutine_self(),
4548 };
4549
4550 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
4551 bdrv_co_io_em_complete, &co);
4552 if (acb == NULL) {
4553 return -EIO;
4554 } else {
4555 qemu_coroutine_yield();
4556 ret = co.ret;
6f14da52 4557 }
6f14da52 4558 }
7ce21016 4559 if (ret && ret != -ENOTSUP) {
d51e9fe5 4560 return ret;
4265d620 4561 }
d51e9fe5
PB
4562
4563 sector_num += num;
4564 nb_sectors -= num;
4265d620 4565 }
d51e9fe5 4566 return 0;
4265d620
PB
4567}
4568
4569int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
4570{
4571 Coroutine *co;
4572 RwCo rwco = {
4573 .bs = bs,
4574 .sector_num = sector_num,
4575 .nb_sectors = nb_sectors,
4576 .ret = NOT_DONE,
4577 };
4578
4579 if (qemu_in_coroutine()) {
4580 /* Fast-path if already in coroutine context */
4581 bdrv_discard_co_entry(&rwco);
4582 } else {
4583 co = qemu_coroutine_create(bdrv_discard_co_entry);
4584 qemu_coroutine_enter(co, &rwco);
4585 while (rwco.ret == NOT_DONE) {
4586 qemu_aio_wait();
4587 }
4588 }
4589
4590 return rwco.ret;
4591}
4592
19cb3738
FB
4593/**************************************************************/
4594/* removable device support */
4595
4596/**
4597 * Return TRUE if the media is present
4598 */
4599int bdrv_is_inserted(BlockDriverState *bs)
4600{
4601 BlockDriver *drv = bs->drv;
a1aff5bf 4602
19cb3738
FB
4603 if (!drv)
4604 return 0;
4605 if (!drv->bdrv_is_inserted)
a1aff5bf
MA
4606 return 1;
4607 return drv->bdrv_is_inserted(bs);
19cb3738
FB
4608}
4609
4610/**
8e49ca46
MA
4611 * Return whether the media changed since the last call to this
4612 * function, or -ENOTSUP if we don't know. Most drivers don't know.
19cb3738
FB
4613 */
4614int bdrv_media_changed(BlockDriverState *bs)
4615{
4616 BlockDriver *drv = bs->drv;
19cb3738 4617
8e49ca46
MA
4618 if (drv && drv->bdrv_media_changed) {
4619 return drv->bdrv_media_changed(bs);
4620 }
4621 return -ENOTSUP;
19cb3738
FB
4622}
4623
4624/**
4625 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4626 */
f36f3949 4627void bdrv_eject(BlockDriverState *bs, bool eject_flag)
19cb3738
FB
4628{
4629 BlockDriver *drv = bs->drv;
19cb3738 4630
822e1cd1
MA
4631 if (drv && drv->bdrv_eject) {
4632 drv->bdrv_eject(bs, eject_flag);
19cb3738 4633 }
6f382ed2
LC
4634
4635 if (bs->device_name[0] != '\0') {
4636 bdrv_emit_qmp_eject_event(bs, eject_flag);
4637 }
19cb3738
FB
4638}
4639
19cb3738
FB
4640/**
4641 * Lock or unlock the media (if it is locked, the user won't be able
4642 * to eject it manually).
4643 */
025e849a 4644void bdrv_lock_medium(BlockDriverState *bs, bool locked)
19cb3738
FB
4645{
4646 BlockDriver *drv = bs->drv;
4647
025e849a 4648 trace_bdrv_lock_medium(bs, locked);
b8c6d095 4649
025e849a
MA
4650 if (drv && drv->bdrv_lock_medium) {
4651 drv->bdrv_lock_medium(bs, locked);
19cb3738
FB
4652 }
4653}
985a03b0
TS
4654
4655/* needed for generic scsi interface */
4656
4657int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
4658{
4659 BlockDriver *drv = bs->drv;
4660
4661 if (drv && drv->bdrv_ioctl)
4662 return drv->bdrv_ioctl(bs, req, buf);
4663 return -ENOTSUP;
4664}
7d780669 4665
221f715d
AL
4666BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
4667 unsigned long int req, void *buf,
4668 BlockDriverCompletionFunc *cb, void *opaque)
7d780669 4669{
221f715d 4670 BlockDriver *drv = bs->drv;
7d780669 4671
221f715d
AL
4672 if (drv && drv->bdrv_aio_ioctl)
4673 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
4674 return NULL;
7d780669 4675}
e268ca52 4676
7b6f9300
MA
4677void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
4678{
4679 bs->buffer_alignment = align;
4680}
7cd1e32a 4681
e268ca52
AL
4682void *qemu_blockalign(BlockDriverState *bs, size_t size)
4683{
4684 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
4685}
7cd1e32a 4686
c53b1c51
SH
4687/*
4688 * Check if all memory in this vector is sector aligned.
4689 */
4690bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
4691{
4692 int i;
4693
4694 for (i = 0; i < qiov->niov; i++) {
4695 if ((uintptr_t) qiov->iov[i].iov_base % bs->buffer_alignment) {
4696 return false;
4697 }
4698 }
4699
4700 return true;
4701}
4702
e4654d2d 4703BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity)
7cd1e32a
LS
4704{
4705 int64_t bitmap_size;
e4654d2d 4706 BdrvDirtyBitmap *bitmap;
a55eb92c 4707
50717e94
PB
4708 assert((granularity & (granularity - 1)) == 0);
4709
e4654d2d
FZ
4710 granularity >>= BDRV_SECTOR_BITS;
4711 assert(granularity);
4712 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
4713 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
4714 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
4715 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
4716 return bitmap;
4717}
4718
4719void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
4720{
4721 BdrvDirtyBitmap *bm, *next;
4722 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
4723 if (bm == bitmap) {
4724 QLIST_REMOVE(bitmap, list);
4725 hbitmap_free(bitmap->bitmap);
4726 g_free(bitmap);
4727 return;
a55eb92c 4728 }
7cd1e32a
LS
4729 }
4730}
4731
21b56835
FZ
4732BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
4733{
4734 BdrvDirtyBitmap *bm;
4735 BlockDirtyInfoList *list = NULL;
4736 BlockDirtyInfoList **plist = &list;
4737
4738 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
4739 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
4740 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
4741 info->count = bdrv_get_dirty_count(bs, bm);
4742 info->granularity =
4743 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
4744 entry->value = info;
4745 *plist = entry;
4746 plist = &entry->next;
4747 }
4748
4749 return list;
4750}
4751
e4654d2d 4752int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
7cd1e32a 4753{
e4654d2d
FZ
4754 if (bitmap) {
4755 return hbitmap_get(bitmap->bitmap, sector);
7cd1e32a
LS
4756 } else {
4757 return 0;
4758 }
4759}
4760
e4654d2d
FZ
4761void bdrv_dirty_iter_init(BlockDriverState *bs,
4762 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
1755da16 4763{
e4654d2d 4764 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
1755da16
PB
4765}
4766
4767void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
4768 int nr_sectors)
4769{
e4654d2d
FZ
4770 BdrvDirtyBitmap *bitmap;
4771 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
4772 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
4773 }
1755da16
PB
4774}
4775
e4654d2d 4776void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
7cd1e32a 4777{
e4654d2d
FZ
4778 BdrvDirtyBitmap *bitmap;
4779 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
4780 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
4781 }
7cd1e32a 4782}
aaa0eb75 4783
e4654d2d 4784int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
aaa0eb75 4785{
e4654d2d 4786 return hbitmap_count(bitmap->bitmap);
aaa0eb75 4787}
f88e1a42 4788
9fcb0251
FZ
4789/* Get a reference to bs */
4790void bdrv_ref(BlockDriverState *bs)
4791{
4792 bs->refcnt++;
4793}
4794
4795/* Release a previously grabbed reference to bs.
4796 * If after releasing, reference count is zero, the BlockDriverState is
4797 * deleted. */
4798void bdrv_unref(BlockDriverState *bs)
4799{
4800 assert(bs->refcnt > 0);
4801 if (--bs->refcnt == 0) {
4802 bdrv_delete(bs);
4803 }
4804}
4805
db593f25
MT
4806void bdrv_set_in_use(BlockDriverState *bs, int in_use)
4807{
4808 assert(bs->in_use != in_use);
4809 bs->in_use = in_use;
4810}
4811
4812int bdrv_in_use(BlockDriverState *bs)
4813{
4814 return bs->in_use;
4815}
4816
28a7282a
LC
4817void bdrv_iostatus_enable(BlockDriverState *bs)
4818{
d6bf279e 4819 bs->iostatus_enabled = true;
58e21ef5 4820 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
28a7282a
LC
4821}
4822
4823/* The I/O status is only enabled if the drive explicitly
4824 * enables it _and_ the VM is configured to stop on errors */
4825bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
4826{
d6bf279e 4827 return (bs->iostatus_enabled &&
92aa5c6d
PB
4828 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
4829 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
4830 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
28a7282a
LC
4831}
4832
4833void bdrv_iostatus_disable(BlockDriverState *bs)
4834{
d6bf279e 4835 bs->iostatus_enabled = false;
28a7282a
LC
4836}
4837
4838void bdrv_iostatus_reset(BlockDriverState *bs)
4839{
4840 if (bdrv_iostatus_is_enabled(bs)) {
58e21ef5 4841 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
3bd293c3
PB
4842 if (bs->job) {
4843 block_job_iostatus_reset(bs->job);
4844 }
28a7282a
LC
4845 }
4846}
4847
28a7282a
LC
4848void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
4849{
3e1caa5f
PB
4850 assert(bdrv_iostatus_is_enabled(bs));
4851 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
58e21ef5
LC
4852 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
4853 BLOCK_DEVICE_IO_STATUS_FAILED;
28a7282a
LC
4854 }
4855}
4856
a597e79c
CH
4857void
4858bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
4859 enum BlockAcctType type)
4860{
4861 assert(type < BDRV_MAX_IOTYPE);
4862
4863 cookie->bytes = bytes;
c488c7f6 4864 cookie->start_time_ns = get_clock();
a597e79c
CH
4865 cookie->type = type;
4866}
4867
4868void
4869bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
4870{
4871 assert(cookie->type < BDRV_MAX_IOTYPE);
4872
4873 bs->nr_bytes[cookie->type] += cookie->bytes;
4874 bs->nr_ops[cookie->type]++;
c488c7f6 4875 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
a597e79c
CH
4876}
4877
d92ada22
LC
4878void bdrv_img_create(const char *filename, const char *fmt,
4879 const char *base_filename, const char *base_fmt,
f382d43a
MR
4880 char *options, uint64_t img_size, int flags,
4881 Error **errp, bool quiet)
f88e1a42
JS
4882{
4883 QEMUOptionParameter *param = NULL, *create_options = NULL;
d220894e 4884 QEMUOptionParameter *backing_fmt, *backing_file, *size;
f88e1a42 4885 BlockDriver *drv, *proto_drv;
96df67d1 4886 BlockDriver *backing_drv = NULL;
cc84d90f 4887 Error *local_err = NULL;
f88e1a42
JS
4888 int ret = 0;
4889
4890 /* Find driver and parse its options */
4891 drv = bdrv_find_format(fmt);
4892 if (!drv) {
71c79813 4893 error_setg(errp, "Unknown file format '%s'", fmt);
d92ada22 4894 return;
f88e1a42
JS
4895 }
4896
98289620 4897 proto_drv = bdrv_find_protocol(filename, true);
f88e1a42 4898 if (!proto_drv) {
71c79813 4899 error_setg(errp, "Unknown protocol '%s'", filename);
d92ada22 4900 return;
f88e1a42
JS
4901 }
4902
4903 create_options = append_option_parameters(create_options,
4904 drv->create_options);
4905 create_options = append_option_parameters(create_options,
4906 proto_drv->create_options);
4907
4908 /* Create parameter list with default values */
4909 param = parse_option_parameters("", create_options, param);
4910
4911 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
4912
4913 /* Parse -o options */
4914 if (options) {
4915 param = parse_option_parameters(options, create_options, param);
4916 if (param == NULL) {
71c79813 4917 error_setg(errp, "Invalid options for file format '%s'.", fmt);
f88e1a42
JS
4918 goto out;
4919 }
4920 }
4921
4922 if (base_filename) {
4923 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
4924 base_filename)) {
71c79813
LC
4925 error_setg(errp, "Backing file not supported for file format '%s'",
4926 fmt);
f88e1a42
JS
4927 goto out;
4928 }
4929 }
4930
4931 if (base_fmt) {
4932 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
71c79813
LC
4933 error_setg(errp, "Backing file format not supported for file "
4934 "format '%s'", fmt);
f88e1a42
JS
4935 goto out;
4936 }
4937 }
4938
792da93a
JS
4939 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
4940 if (backing_file && backing_file->value.s) {
4941 if (!strcmp(filename, backing_file->value.s)) {
71c79813
LC
4942 error_setg(errp, "Error: Trying to create an image with the "
4943 "same filename as the backing file");
792da93a
JS
4944 goto out;
4945 }
4946 }
4947
f88e1a42
JS
4948 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
4949 if (backing_fmt && backing_fmt->value.s) {
96df67d1
SH
4950 backing_drv = bdrv_find_format(backing_fmt->value.s);
4951 if (!backing_drv) {
71c79813
LC
4952 error_setg(errp, "Unknown backing file format '%s'",
4953 backing_fmt->value.s);
f88e1a42
JS
4954 goto out;
4955 }
4956 }
4957
4958 // The size for the image must always be specified, with one exception:
4959 // If we are using a backing file, we can obtain the size from there
d220894e
KW
4960 size = get_option_parameter(param, BLOCK_OPT_SIZE);
4961 if (size && size->value.n == -1) {
f88e1a42 4962 if (backing_file && backing_file->value.s) {
66f6b814 4963 BlockDriverState *bs;
f88e1a42 4964 uint64_t size;
f88e1a42 4965 char buf[32];
63090dac
PB
4966 int back_flags;
4967
4968 /* backing files always opened read-only */
4969 back_flags =
4970 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
f88e1a42 4971
f88e1a42
JS
4972 bs = bdrv_new("");
4973
de9c0cec 4974 ret = bdrv_open(bs, backing_file->value.s, NULL, back_flags,
cc84d90f 4975 backing_drv, &local_err);
f88e1a42 4976 if (ret < 0) {
cc84d90f
HR
4977 error_setg_errno(errp, -ret, "Could not open '%s': %s",
4978 backing_file->value.s,
4979 error_get_pretty(local_err));
4980 error_free(local_err);
4981 local_err = NULL;
66f6b814 4982 bdrv_unref(bs);
f88e1a42
JS
4983 goto out;
4984 }
4985 bdrv_get_geometry(bs, &size);
4986 size *= 512;
4987
4988 snprintf(buf, sizeof(buf), "%" PRId64, size);
4989 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
66f6b814
HR
4990
4991 bdrv_unref(bs);
f88e1a42 4992 } else {
71c79813 4993 error_setg(errp, "Image creation needs a size parameter");
f88e1a42
JS
4994 goto out;
4995 }
4996 }
4997
f382d43a
MR
4998 if (!quiet) {
4999 printf("Formatting '%s', fmt=%s ", filename, fmt);
5000 print_option_parameters(param);
5001 puts("");
5002 }
cc84d90f
HR
5003 ret = bdrv_create(drv, filename, param, &local_err);
5004 if (ret == -EFBIG) {
5005 /* This is generally a better message than whatever the driver would
5006 * deliver (especially because of the cluster_size_hint), since that
5007 * is most probably not much different from "image too large". */
5008 const char *cluster_size_hint = "";
5009 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
5010 cluster_size_hint = " (try using a larger cluster size)";
f88e1a42 5011 }
cc84d90f
HR
5012 error_setg(errp, "The image size is too large for file format '%s'"
5013 "%s", fmt, cluster_size_hint);
5014 error_free(local_err);
5015 local_err = NULL;
f88e1a42
JS
5016 }
5017
5018out:
5019 free_option_parameters(create_options);
5020 free_option_parameters(param);
5021
cc84d90f
HR
5022 if (error_is_set(&local_err)) {
5023 error_propagate(errp, local_err);
5024 }
f88e1a42 5025}
85d126f3
SH
5026
5027AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5028{
5029 /* Currently BlockDriverState always uses the main loop AioContext */
5030 return qemu_get_aio_context();
5031}
d616b224
SH
5032
5033void bdrv_add_before_write_notifier(BlockDriverState *bs,
5034 NotifierWithReturn *notifier)
5035{
5036 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5037}
6f176b48
HR
5038
5039int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
5040{
5041 if (bs->drv->bdrv_amend_options == NULL) {
5042 return -ENOTSUP;
5043 }
5044 return bs->drv->bdrv_amend_options(bs, options);
5045}
f6186f49
BC
5046
5047ExtSnapshotPerm bdrv_check_ext_snapshot(BlockDriverState *bs)
5048{
5049 if (bs->drv->bdrv_check_ext_snapshot) {
5050 return bs->drv->bdrv_check_ext_snapshot(bs);
5051 }
5052
5053 if (bs->file && bs->file->drv && bs->file->drv->bdrv_check_ext_snapshot) {
5054 return bs->file->drv->bdrv_check_ext_snapshot(bs);
5055 }
5056
5057 /* external snapshots are allowed by default */
5058 return EXT_SNAPSHOT_ALLOWED;
5059}
5060
5061ExtSnapshotPerm bdrv_check_ext_snapshot_forbidden(BlockDriverState *bs)
5062{
5063 return EXT_SNAPSHOT_FORBIDDEN;
5064}