]> git.proxmox.com Git - mirror_qemu.git/blame - block/io.c
qapi: allow blockdev-add for ssh
[mirror_qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
61007b31
SH
28#include "block/blockjob.h"
29#include "block/block_int.h"
f348b6d1 30#include "qemu/cutils.h"
da34e65c 31#include "qapi/error.h"
d49b6836 32#include "qemu/error-report.h"
61007b31
SH
33
34#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35
b15404e0
EB
36static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
37 int64_t offset,
38 QEMUIOVector *qiov,
39 BdrvRequestFlags flags,
40 BlockCompletionFunc *cb,
41 void *opaque,
42 bool is_write);
61007b31 43static void coroutine_fn bdrv_co_do_rw(void *opaque);
d05aa8bb
EB
44static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45 int64_t offset, int count, BdrvRequestFlags flags);
61007b31 46
c2066af0 47static void bdrv_parent_drained_begin(BlockDriverState *bs)
61007b31 48{
c2066af0 49 BdrvChild *c;
27ccdd52 50
c2066af0
KW
51 QLIST_FOREACH(c, &bs->parents, next_parent) {
52 if (c->role->drained_begin) {
53 c->role->drained_begin(c);
54 }
ce0f1412
PB
55 }
56}
61007b31 57
c2066af0 58static void bdrv_parent_drained_end(BlockDriverState *bs)
ce0f1412 59{
c2066af0 60 BdrvChild *c;
27ccdd52 61
c2066af0
KW
62 QLIST_FOREACH(c, &bs->parents, next_parent) {
63 if (c->role->drained_end) {
64 c->role->drained_end(c);
65 }
27ccdd52 66 }
61007b31
SH
67}
68
d9e0dfa2
EB
69static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
70{
71 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
72 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
73 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
74 src->opt_mem_alignment);
75 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
76 src->min_mem_alignment);
77 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
78}
79
61007b31
SH
80void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
81{
82 BlockDriver *drv = bs->drv;
83 Error *local_err = NULL;
84
85 memset(&bs->bl, 0, sizeof(bs->bl));
86
87 if (!drv) {
88 return;
89 }
90
79ba8c98 91 /* Default alignment based on whether driver has byte interface */
a5b8dd2c 92 bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
79ba8c98 93
61007b31
SH
94 /* Take some limits from the children as a default */
95 if (bs->file) {
9a4f4c31 96 bdrv_refresh_limits(bs->file->bs, &local_err);
61007b31
SH
97 if (local_err) {
98 error_propagate(errp, local_err);
99 return;
100 }
d9e0dfa2 101 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
61007b31 102 } else {
4196d2f0 103 bs->bl.min_mem_alignment = 512;
459b4e66 104 bs->bl.opt_mem_alignment = getpagesize();
bd44feb7
SH
105
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs->bl.max_iov = IOV_MAX;
61007b31
SH
108 }
109
760e0063
KW
110 if (bs->backing) {
111 bdrv_refresh_limits(bs->backing->bs, &local_err);
61007b31
SH
112 if (local_err) {
113 error_propagate(errp, local_err);
114 return;
115 }
d9e0dfa2 116 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
61007b31
SH
117 }
118
119 /* Then let the driver override it */
120 if (drv->bdrv_refresh_limits) {
121 drv->bdrv_refresh_limits(bs, errp);
122 }
123}
124
125/**
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
129 */
130void bdrv_enable_copy_on_read(BlockDriverState *bs)
131{
132 bs->copy_on_read++;
133}
134
135void bdrv_disable_copy_on_read(BlockDriverState *bs)
136{
137 assert(bs->copy_on_read > 0);
138 bs->copy_on_read--;
139}
140
141/* Check if any requests are in-flight (including throttled requests) */
439db28c 142bool bdrv_requests_pending(BlockDriverState *bs)
61007b31 143{
37a639a7
KW
144 BdrvChild *child;
145
99723548 146 if (atomic_read(&bs->in_flight)) {
61007b31
SH
147 return true;
148 }
37a639a7
KW
149
150 QLIST_FOREACH(child, &bs->children, next) {
151 if (bdrv_requests_pending(child->bs)) {
152 return true;
153 }
61007b31 154 }
37a639a7 155
61007b31
SH
156 return false;
157}
158
d42cf288 159static bool bdrv_drain_recurse(BlockDriverState *bs)
67da1dc5
FZ
160{
161 BdrvChild *child;
d42cf288
PB
162 bool waited;
163
88b062c2 164 waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
67da1dc5
FZ
165
166 if (bs->drv && bs->drv->bdrv_drain) {
167 bs->drv->bdrv_drain(bs);
168 }
d42cf288 169
67da1dc5 170 QLIST_FOREACH(child, &bs->children, next) {
d42cf288 171 waited |= bdrv_drain_recurse(child->bs);
67da1dc5 172 }
d42cf288
PB
173
174 return waited;
67da1dc5
FZ
175}
176
a77fd4bb
FZ
177typedef struct {
178 Coroutine *co;
179 BlockDriverState *bs;
a77fd4bb
FZ
180 bool done;
181} BdrvCoDrainData;
182
183static void bdrv_co_drain_bh_cb(void *opaque)
184{
185 BdrvCoDrainData *data = opaque;
186 Coroutine *co = data->co;
99723548 187 BlockDriverState *bs = data->bs;
a77fd4bb 188
99723548 189 bdrv_dec_in_flight(bs);
d42cf288 190 bdrv_drained_begin(bs);
a77fd4bb 191 data->done = true;
0b8b8753 192 qemu_coroutine_enter(co);
a77fd4bb
FZ
193}
194
b6e84c97 195static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
a77fd4bb
FZ
196{
197 BdrvCoDrainData data;
198
199 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
200 * other coroutines run if they were queued from
201 * qemu_co_queue_run_restart(). */
202
203 assert(qemu_in_coroutine());
204 data = (BdrvCoDrainData) {
205 .co = qemu_coroutine_self(),
206 .bs = bs,
207 .done = false,
a77fd4bb 208 };
99723548 209 bdrv_inc_in_flight(bs);
fffb6e12
PB
210 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
211 bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
212
213 qemu_coroutine_yield();
214 /* If we are resumed from some other event (such as an aio completion or a
215 * timer callback), it is a bug in the caller that should be fixed. */
216 assert(data.done);
217}
218
6820643f
KW
219void bdrv_drained_begin(BlockDriverState *bs)
220{
d42cf288
PB
221 if (qemu_in_coroutine()) {
222 bdrv_co_yield_to_drain(bs);
223 return;
224 }
225
6820643f
KW
226 if (!bs->quiesce_counter++) {
227 aio_disable_external(bdrv_get_aio_context(bs));
228 bdrv_parent_drained_begin(bs);
229 }
230
231 bdrv_io_unplugged_begin(bs);
232 bdrv_drain_recurse(bs);
6820643f
KW
233 bdrv_io_unplugged_end(bs);
234}
235
236void bdrv_drained_end(BlockDriverState *bs)
237{
238 assert(bs->quiesce_counter > 0);
239 if (--bs->quiesce_counter > 0) {
240 return;
241 }
242
243 bdrv_parent_drained_end(bs);
244 aio_enable_external(bdrv_get_aio_context(bs));
245}
246
61007b31 247/*
67da1dc5
FZ
248 * Wait for pending requests to complete on a single BlockDriverState subtree,
249 * and suspend block driver's internal I/O until next request arrives.
61007b31 250 *
61007b31
SH
251 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
252 * AioContext.
7a63f3cd
SH
253 *
254 * Only this BlockDriverState's AioContext is run, so in-flight requests must
255 * not depend on events in other AioContexts. In that case, use
256 * bdrv_drain_all() instead.
61007b31 257 */
b6e84c97 258void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 259{
6820643f
KW
260 assert(qemu_in_coroutine());
261 bdrv_drained_begin(bs);
262 bdrv_drained_end(bs);
b6e84c97 263}
f406c03c 264
b6e84c97
PB
265void bdrv_drain(BlockDriverState *bs)
266{
6820643f
KW
267 bdrv_drained_begin(bs);
268 bdrv_drained_end(bs);
61007b31
SH
269}
270
271/*
272 * Wait for pending requests to complete across all BlockDriverStates
273 *
274 * This function does not flush data to disk, use bdrv_flush_all() for that
275 * after calling this function.
61007b31
SH
276 */
277void bdrv_drain_all(void)
278{
279 /* Always run first iteration so any pending completion BHs run */
99723548 280 bool waited = true;
7c8eece4 281 BlockDriverState *bs;
88be7b4b 282 BdrvNextIterator it;
eb1364ce 283 BlockJob *job = NULL;
f406c03c 284 GSList *aio_ctxs = NULL, *ctx;
61007b31 285
eb1364ce
AG
286 while ((job = block_job_next(job))) {
287 AioContext *aio_context = blk_get_aio_context(job->blk);
288
289 aio_context_acquire(aio_context);
290 block_job_pause(job);
291 aio_context_release(aio_context);
292 }
293
88be7b4b 294 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
295 AioContext *aio_context = bdrv_get_aio_context(bs);
296
297 aio_context_acquire(aio_context);
c2066af0 298 bdrv_parent_drained_begin(bs);
6b98bd64 299 bdrv_io_unplugged_begin(bs);
61007b31 300 aio_context_release(aio_context);
f406c03c 301
764ba3ae 302 if (!g_slist_find(aio_ctxs, aio_context)) {
f406c03c
AY
303 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
304 }
61007b31
SH
305 }
306
7a63f3cd
SH
307 /* Note that completion of an asynchronous I/O operation can trigger any
308 * number of other I/O operations on other devices---for example a
309 * coroutine can submit an I/O request to another device in response to
310 * request completion. Therefore we must keep looping until there was no
311 * more activity rather than simply draining each device independently.
312 */
99723548
PB
313 while (waited) {
314 waited = false;
61007b31 315
f406c03c
AY
316 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
317 AioContext *aio_context = ctx->data;
61007b31
SH
318
319 aio_context_acquire(aio_context);
88be7b4b 320 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
f406c03c 321 if (aio_context == bdrv_get_aio_context(bs)) {
d42cf288 322 waited |= bdrv_drain_recurse(bs);
f406c03c
AY
323 }
324 }
61007b31
SH
325 aio_context_release(aio_context);
326 }
327 }
328
88be7b4b 329 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
330 AioContext *aio_context = bdrv_get_aio_context(bs);
331
332 aio_context_acquire(aio_context);
6b98bd64 333 bdrv_io_unplugged_end(bs);
c2066af0 334 bdrv_parent_drained_end(bs);
61007b31
SH
335 aio_context_release(aio_context);
336 }
f406c03c 337 g_slist_free(aio_ctxs);
eb1364ce
AG
338
339 job = NULL;
340 while ((job = block_job_next(job))) {
341 AioContext *aio_context = blk_get_aio_context(job->blk);
342
343 aio_context_acquire(aio_context);
344 block_job_resume(job);
345 aio_context_release(aio_context);
346 }
61007b31
SH
347}
348
349/**
350 * Remove an active request from the tracked requests list
351 *
352 * This function should be called when a tracked request is completing.
353 */
354static void tracked_request_end(BdrvTrackedRequest *req)
355{
356 if (req->serialising) {
357 req->bs->serialising_in_flight--;
358 }
359
360 QLIST_REMOVE(req, list);
361 qemu_co_queue_restart_all(&req->wait_queue);
362}
363
364/**
365 * Add an active request to the tracked requests list
366 */
367static void tracked_request_begin(BdrvTrackedRequest *req,
368 BlockDriverState *bs,
369 int64_t offset,
ebde595c
FZ
370 unsigned int bytes,
371 enum BdrvTrackedRequestType type)
61007b31
SH
372{
373 *req = (BdrvTrackedRequest){
374 .bs = bs,
375 .offset = offset,
376 .bytes = bytes,
ebde595c 377 .type = type,
61007b31
SH
378 .co = qemu_coroutine_self(),
379 .serialising = false,
380 .overlap_offset = offset,
381 .overlap_bytes = bytes,
382 };
383
384 qemu_co_queue_init(&req->wait_queue);
385
386 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
387}
388
389static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
390{
391 int64_t overlap_offset = req->offset & ~(align - 1);
392 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
393 - overlap_offset;
394
395 if (!req->serialising) {
396 req->bs->serialising_in_flight++;
397 req->serialising = true;
398 }
399
400 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
401 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
402}
403
404/**
244483e6 405 * Round a region to cluster boundaries (sector-based)
61007b31 406 */
244483e6
KW
407void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
408 int64_t sector_num, int nb_sectors,
409 int64_t *cluster_sector_num,
410 int *cluster_nb_sectors)
61007b31
SH
411{
412 BlockDriverInfo bdi;
413
414 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
415 *cluster_sector_num = sector_num;
416 *cluster_nb_sectors = nb_sectors;
417 } else {
418 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
419 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
420 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
421 nb_sectors, c);
422 }
423}
424
244483e6
KW
425/**
426 * Round a region to cluster boundaries
427 */
428void bdrv_round_to_clusters(BlockDriverState *bs,
429 int64_t offset, unsigned int bytes,
430 int64_t *cluster_offset,
431 unsigned int *cluster_bytes)
432{
433 BlockDriverInfo bdi;
434
435 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
436 *cluster_offset = offset;
437 *cluster_bytes = bytes;
438 } else {
439 int64_t c = bdi.cluster_size;
440 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
441 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
442 }
443}
444
61007b31
SH
445static int bdrv_get_cluster_size(BlockDriverState *bs)
446{
447 BlockDriverInfo bdi;
448 int ret;
449
450 ret = bdrv_get_info(bs, &bdi);
451 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 452 return bs->bl.request_alignment;
61007b31
SH
453 } else {
454 return bdi.cluster_size;
455 }
456}
457
458static bool tracked_request_overlaps(BdrvTrackedRequest *req,
459 int64_t offset, unsigned int bytes)
460{
461 /* aaaa bbbb */
462 if (offset >= req->overlap_offset + req->overlap_bytes) {
463 return false;
464 }
465 /* bbbb aaaa */
466 if (req->overlap_offset >= offset + bytes) {
467 return false;
468 }
469 return true;
470}
471
99723548
PB
472void bdrv_inc_in_flight(BlockDriverState *bs)
473{
474 atomic_inc(&bs->in_flight);
475}
476
c9d1a561
PB
477static void dummy_bh_cb(void *opaque)
478{
479}
480
481void bdrv_wakeup(BlockDriverState *bs)
482{
483 if (bs->wakeup) {
484 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
485 }
486}
487
99723548
PB
488void bdrv_dec_in_flight(BlockDriverState *bs)
489{
490 atomic_dec(&bs->in_flight);
c9d1a561 491 bdrv_wakeup(bs);
99723548
PB
492}
493
61007b31
SH
494static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
495{
496 BlockDriverState *bs = self->bs;
497 BdrvTrackedRequest *req;
498 bool retry;
499 bool waited = false;
500
501 if (!bs->serialising_in_flight) {
502 return false;
503 }
504
505 do {
506 retry = false;
507 QLIST_FOREACH(req, &bs->tracked_requests, list) {
508 if (req == self || (!req->serialising && !self->serialising)) {
509 continue;
510 }
511 if (tracked_request_overlaps(req, self->overlap_offset,
512 self->overlap_bytes))
513 {
514 /* Hitting this means there was a reentrant request, for
515 * example, a block driver issuing nested requests. This must
516 * never happen since it means deadlock.
517 */
518 assert(qemu_coroutine_self() != req->co);
519
520 /* If the request is already (indirectly) waiting for us, or
521 * will wait for us as soon as it wakes up, then just go on
522 * (instead of producing a deadlock in the former case). */
523 if (!req->waiting_for) {
524 self->waiting_for = req;
525 qemu_co_queue_wait(&req->wait_queue);
526 self->waiting_for = NULL;
527 retry = true;
528 waited = true;
529 break;
530 }
531 }
532 }
533 } while (retry);
534
535 return waited;
536}
537
538static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
539 size_t size)
540{
541 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
542 return -EIO;
543 }
544
545 if (!bdrv_is_inserted(bs)) {
546 return -ENOMEDIUM;
547 }
548
549 if (offset < 0) {
550 return -EIO;
551 }
552
553 return 0;
554}
555
61007b31 556typedef struct RwCo {
e293b7a3 557 BdrvChild *child;
61007b31
SH
558 int64_t offset;
559 QEMUIOVector *qiov;
560 bool is_write;
561 int ret;
562 BdrvRequestFlags flags;
563} RwCo;
564
565static void coroutine_fn bdrv_rw_co_entry(void *opaque)
566{
567 RwCo *rwco = opaque;
568
569 if (!rwco->is_write) {
a03ef88f 570 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
cab3a356
KW
571 rwco->qiov->size, rwco->qiov,
572 rwco->flags);
61007b31 573 } else {
a03ef88f 574 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
cab3a356
KW
575 rwco->qiov->size, rwco->qiov,
576 rwco->flags);
61007b31
SH
577 }
578}
579
580/*
581 * Process a vectored synchronous request using coroutines
582 */
e293b7a3 583static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
61007b31
SH
584 QEMUIOVector *qiov, bool is_write,
585 BdrvRequestFlags flags)
586{
587 Coroutine *co;
588 RwCo rwco = {
e293b7a3 589 .child = child,
61007b31
SH
590 .offset = offset,
591 .qiov = qiov,
592 .is_write = is_write,
593 .ret = NOT_DONE,
594 .flags = flags,
595 };
596
61007b31
SH
597 if (qemu_in_coroutine()) {
598 /* Fast-path if already in coroutine context */
599 bdrv_rw_co_entry(&rwco);
600 } else {
0b8b8753
PB
601 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
602 qemu_coroutine_enter(co);
88b062c2 603 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
61007b31
SH
604 }
605 return rwco.ret;
606}
607
608/*
609 * Process a synchronous request using coroutines
610 */
e293b7a3 611static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
61007b31
SH
612 int nb_sectors, bool is_write, BdrvRequestFlags flags)
613{
614 QEMUIOVector qiov;
615 struct iovec iov = {
616 .iov_base = (void *)buf,
617 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
618 };
619
620 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
621 return -EINVAL;
622 }
623
624 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 625 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
61007b31
SH
626 &qiov, is_write, flags);
627}
628
629/* return < 0 if error. See bdrv_write() for the return codes */
fbcbbf4e 630int bdrv_read(BdrvChild *child, int64_t sector_num,
61007b31
SH
631 uint8_t *buf, int nb_sectors)
632{
e293b7a3 633 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
61007b31
SH
634}
635
61007b31
SH
636/* Return < 0 if error. Important errors are:
637 -EIO generic I/O error (may happen for all errors)
638 -ENOMEDIUM No media inserted.
639 -EINVAL Invalid sector number or nb_sectors
640 -EACCES Trying to write a read-only device
641*/
18d51c4b 642int bdrv_write(BdrvChild *child, int64_t sector_num,
61007b31
SH
643 const uint8_t *buf, int nb_sectors)
644{
e293b7a3 645 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
61007b31
SH
646}
647
720ff280 648int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
74021bc4 649 int count, BdrvRequestFlags flags)
61007b31 650{
74021bc4
EB
651 QEMUIOVector qiov;
652 struct iovec iov = {
653 .iov_base = NULL,
654 .iov_len = count,
655 };
656
657 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 658 return bdrv_prwv_co(child, offset, &qiov, true,
74021bc4 659 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
660}
661
662/*
74021bc4 663 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
664 * The operation is sped up by checking the block status and only writing
665 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 666 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 667 * BDRV_REQ_FUA).
61007b31
SH
668 *
669 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
670 */
720ff280 671int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31
SH
672{
673 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
720ff280 674 BlockDriverState *bs = child->bs;
67a0fd2a 675 BlockDriverState *file;
61007b31
SH
676 int n;
677
678 target_sectors = bdrv_nb_sectors(bs);
679 if (target_sectors < 0) {
680 return target_sectors;
681 }
682
683 for (;;) {
684 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
685 if (nb_sectors <= 0) {
686 return 0;
687 }
67a0fd2a 688 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
61007b31
SH
689 if (ret < 0) {
690 error_report("error getting block status at sector %" PRId64 ": %s",
691 sector_num, strerror(-ret));
692 return ret;
693 }
694 if (ret & BDRV_BLOCK_ZERO) {
695 sector_num += n;
696 continue;
697 }
720ff280 698 ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
74021bc4 699 n << BDRV_SECTOR_BITS, flags);
61007b31
SH
700 if (ret < 0) {
701 error_report("error writing zeroes at sector %" PRId64 ": %s",
702 sector_num, strerror(-ret));
703 return ret;
704 }
705 sector_num += n;
706 }
707}
708
cf2ab8fc 709int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
f1e84741
KW
710{
711 int ret;
712
e293b7a3 713 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
f1e84741
KW
714 if (ret < 0) {
715 return ret;
716 }
717
718 return qiov->size;
719}
720
cf2ab8fc 721int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
61007b31
SH
722{
723 QEMUIOVector qiov;
724 struct iovec iov = {
725 .iov_base = (void *)buf,
726 .iov_len = bytes,
727 };
61007b31
SH
728
729 if (bytes < 0) {
730 return -EINVAL;
731 }
732
733 qemu_iovec_init_external(&qiov, &iov, 1);
cf2ab8fc 734 return bdrv_preadv(child, offset, &qiov);
61007b31
SH
735}
736
d9ca2ea2 737int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
61007b31
SH
738{
739 int ret;
740
e293b7a3 741 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
61007b31
SH
742 if (ret < 0) {
743 return ret;
744 }
745
746 return qiov->size;
747}
748
d9ca2ea2 749int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
61007b31
SH
750{
751 QEMUIOVector qiov;
752 struct iovec iov = {
753 .iov_base = (void *) buf,
754 .iov_len = bytes,
755 };
756
757 if (bytes < 0) {
758 return -EINVAL;
759 }
760
761 qemu_iovec_init_external(&qiov, &iov, 1);
d9ca2ea2 762 return bdrv_pwritev(child, offset, &qiov);
61007b31
SH
763}
764
765/*
766 * Writes to the file and ensures that no writes are reordered across this
767 * request (acts as a barrier)
768 *
769 * Returns 0 on success, -errno in error cases.
770 */
d9ca2ea2
KW
771int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
772 const void *buf, int count)
61007b31
SH
773{
774 int ret;
775
d9ca2ea2 776 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
777 if (ret < 0) {
778 return ret;
779 }
780
d9ca2ea2 781 ret = bdrv_flush(child->bs);
855a6a93
KW
782 if (ret < 0) {
783 return ret;
61007b31
SH
784 }
785
786 return 0;
787}
788
08844473
KW
789typedef struct CoroutineIOCompletion {
790 Coroutine *coroutine;
791 int ret;
792} CoroutineIOCompletion;
793
794static void bdrv_co_io_em_complete(void *opaque, int ret)
795{
796 CoroutineIOCompletion *co = opaque;
797
798 co->ret = ret;
0b8b8753 799 qemu_coroutine_enter(co->coroutine);
08844473
KW
800}
801
166fe960
KW
802static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
803 uint64_t offset, uint64_t bytes,
804 QEMUIOVector *qiov, int flags)
805{
806 BlockDriver *drv = bs->drv;
3fb06697
KW
807 int64_t sector_num;
808 unsigned int nb_sectors;
809
fa166538
EB
810 assert(!(flags & ~BDRV_REQ_MASK));
811
3fb06697
KW
812 if (drv->bdrv_co_preadv) {
813 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
814 }
815
816 sector_num = offset >> BDRV_SECTOR_BITS;
817 nb_sectors = bytes >> BDRV_SECTOR_BITS;
166fe960
KW
818
819 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
820 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
821 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
822
08844473
KW
823 if (drv->bdrv_co_readv) {
824 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
825 } else {
826 BlockAIOCB *acb;
827 CoroutineIOCompletion co = {
828 .coroutine = qemu_coroutine_self(),
829 };
830
831 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
832 bdrv_co_io_em_complete, &co);
833 if (acb == NULL) {
834 return -EIO;
835 } else {
836 qemu_coroutine_yield();
837 return co.ret;
838 }
839 }
166fe960
KW
840}
841
78a07294
KW
842static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
843 uint64_t offset, uint64_t bytes,
844 QEMUIOVector *qiov, int flags)
845{
846 BlockDriver *drv = bs->drv;
3fb06697
KW
847 int64_t sector_num;
848 unsigned int nb_sectors;
78a07294
KW
849 int ret;
850
fa166538
EB
851 assert(!(flags & ~BDRV_REQ_MASK));
852
3fb06697 853 if (drv->bdrv_co_pwritev) {
515c2f43
KW
854 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
855 flags & bs->supported_write_flags);
856 flags &= ~bs->supported_write_flags;
3fb06697
KW
857 goto emulate_flags;
858 }
859
860 sector_num = offset >> BDRV_SECTOR_BITS;
861 nb_sectors = bytes >> BDRV_SECTOR_BITS;
862
78a07294
KW
863 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
864 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
865 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
866
867 if (drv->bdrv_co_writev_flags) {
868 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
4df863f3
EB
869 flags & bs->supported_write_flags);
870 flags &= ~bs->supported_write_flags;
08844473 871 } else if (drv->bdrv_co_writev) {
4df863f3 872 assert(!bs->supported_write_flags);
78a07294 873 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
08844473
KW
874 } else {
875 BlockAIOCB *acb;
876 CoroutineIOCompletion co = {
877 .coroutine = qemu_coroutine_self(),
878 };
879
880 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
881 bdrv_co_io_em_complete, &co);
882 if (acb == NULL) {
3fb06697 883 ret = -EIO;
08844473
KW
884 } else {
885 qemu_coroutine_yield();
3fb06697 886 ret = co.ret;
08844473 887 }
78a07294
KW
888 }
889
3fb06697 890emulate_flags:
4df863f3 891 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
892 ret = bdrv_co_flush(bs);
893 }
894
895 return ret;
896}
897
29a298af
PB
898static int coroutine_fn
899bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
900 uint64_t bytes, QEMUIOVector *qiov)
901{
902 BlockDriver *drv = bs->drv;
903
904 if (!drv->bdrv_co_pwritev_compressed) {
905 return -ENOTSUP;
906 }
907
29a298af
PB
908 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
909}
910
61007b31 911static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
244483e6 912 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
61007b31
SH
913{
914 /* Perform I/O through a temporary buffer so that users who scribble over
915 * their read buffer while the operation is in progress do not end up
916 * modifying the image file. This is critical for zero-copy guest I/O
917 * where anything might happen inside guest memory.
918 */
919 void *bounce_buffer;
920
921 BlockDriver *drv = bs->drv;
922 struct iovec iov;
923 QEMUIOVector bounce_qiov;
244483e6
KW
924 int64_t cluster_offset;
925 unsigned int cluster_bytes;
61007b31
SH
926 size_t skip_bytes;
927 int ret;
928
929 /* Cover entire cluster so no additional backing file I/O is required when
930 * allocating cluster in the image file.
931 */
244483e6 932 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
61007b31 933
244483e6
KW
934 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
935 cluster_offset, cluster_bytes);
61007b31 936
244483e6 937 iov.iov_len = cluster_bytes;
61007b31
SH
938 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
939 if (bounce_buffer == NULL) {
940 ret = -ENOMEM;
941 goto err;
942 }
943
944 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
945
244483e6 946 ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
166fe960 947 &bounce_qiov, 0);
61007b31
SH
948 if (ret < 0) {
949 goto err;
950 }
951
c1499a5e 952 if (drv->bdrv_co_pwrite_zeroes &&
61007b31 953 buffer_is_zero(bounce_buffer, iov.iov_len)) {
a604fa2b
EB
954 /* FIXME: Should we (perhaps conditionally) be setting
955 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
956 * that still correctly reads as zero? */
244483e6 957 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
61007b31
SH
958 } else {
959 /* This does not change the data on the disk, it is not necessary
960 * to flush even in cache=writethrough mode.
961 */
244483e6 962 ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
78a07294 963 &bounce_qiov, 0);
61007b31
SH
964 }
965
966 if (ret < 0) {
967 /* It might be okay to ignore write errors for guest requests. If this
968 * is a deliberate copy-on-read then we don't want to ignore the error.
969 * Simply report it in all cases.
970 */
971 goto err;
972 }
973
244483e6
KW
974 skip_bytes = offset - cluster_offset;
975 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
61007b31
SH
976
977err:
978 qemu_vfree(bounce_buffer);
979 return ret;
980}
981
982/*
983 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
984 * handles copy on read, zeroing after EOF, and fragmentation of large
985 * reads; any other features must be implemented by the caller.
61007b31
SH
986 */
987static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
988 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
989 int64_t align, QEMUIOVector *qiov, int flags)
990{
c9d20029 991 int64_t total_bytes, max_bytes;
1a62d0ac
EB
992 int ret = 0;
993 uint64_t bytes_remaining = bytes;
994 int max_transfer;
61007b31 995
49c07526
KW
996 assert(is_power_of_2(align));
997 assert((offset & (align - 1)) == 0);
998 assert((bytes & (align - 1)) == 0);
61007b31 999 assert(!qiov || bytes == qiov->size);
abb06c5a 1000 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1001 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1002 align);
a604fa2b
EB
1003
1004 /* TODO: We would need a per-BDS .supported_read_flags and
1005 * potential fallback support, if we ever implement any read flags
1006 * to pass through to drivers. For now, there aren't any
1007 * passthrough flags. */
1008 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
61007b31
SH
1009
1010 /* Handle Copy on Read and associated serialisation */
1011 if (flags & BDRV_REQ_COPY_ON_READ) {
1012 /* If we touch the same cluster it counts as an overlap. This
1013 * guarantees that allocating writes will be serialized and not race
1014 * with each other for the same cluster. For example, in copy-on-read
1015 * it ensures that the CoR read and write operations are atomic and
1016 * guest writes cannot interleave between them. */
1017 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1018 }
1019
61408b25
FZ
1020 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1021 wait_serialising_requests(req);
1022 }
61007b31
SH
1023
1024 if (flags & BDRV_REQ_COPY_ON_READ) {
49c07526
KW
1025 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1026 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1027 unsigned int nb_sectors = end_sector - start_sector;
61007b31
SH
1028 int pnum;
1029
49c07526 1030 ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
61007b31
SH
1031 if (ret < 0) {
1032 goto out;
1033 }
1034
1035 if (!ret || pnum != nb_sectors) {
244483e6 1036 ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
61007b31
SH
1037 goto out;
1038 }
1039 }
1040
1a62d0ac 1041 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1042 total_bytes = bdrv_getlength(bs);
1043 if (total_bytes < 0) {
1044 ret = total_bytes;
1045 goto out;
1046 }
61007b31 1047
c9d20029 1048 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1049 if (bytes <= max_bytes && bytes <= max_transfer) {
c9d20029 1050 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1a62d0ac
EB
1051 goto out;
1052 }
61007b31 1053
1a62d0ac
EB
1054 while (bytes_remaining) {
1055 int num;
61007b31 1056
1a62d0ac
EB
1057 if (max_bytes) {
1058 QEMUIOVector local_qiov;
61007b31 1059
1a62d0ac
EB
1060 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1061 assert(num);
1062 qemu_iovec_init(&local_qiov, qiov->niov);
1063 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
61007b31 1064
1a62d0ac
EB
1065 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1066 num, &local_qiov, 0);
1067 max_bytes -= num;
1068 qemu_iovec_destroy(&local_qiov);
1069 } else {
1070 num = bytes_remaining;
1071 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1072 bytes_remaining);
1073 }
1074 if (ret < 0) {
1075 goto out;
1076 }
1077 bytes_remaining -= num;
61007b31
SH
1078 }
1079
1080out:
1a62d0ac 1081 return ret < 0 ? ret : 0;
61007b31
SH
1082}
1083
61007b31
SH
1084/*
1085 * Handle a read request in coroutine context
1086 */
a03ef88f 1087int coroutine_fn bdrv_co_preadv(BdrvChild *child,
61007b31
SH
1088 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1089 BdrvRequestFlags flags)
1090{
a03ef88f 1091 BlockDriverState *bs = child->bs;
61007b31
SH
1092 BlockDriver *drv = bs->drv;
1093 BdrvTrackedRequest req;
1094
a5b8dd2c 1095 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1096 uint8_t *head_buf = NULL;
1097 uint8_t *tail_buf = NULL;
1098 QEMUIOVector local_qiov;
1099 bool use_local_qiov = false;
1100 int ret;
1101
1102 if (!drv) {
1103 return -ENOMEDIUM;
1104 }
1105
1106 ret = bdrv_check_byte_request(bs, offset, bytes);
1107 if (ret < 0) {
1108 return ret;
1109 }
1110
99723548
PB
1111 bdrv_inc_in_flight(bs);
1112
9568b511 1113 /* Don't do copy-on-read if we read data before write operation */
61408b25 1114 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
61007b31
SH
1115 flags |= BDRV_REQ_COPY_ON_READ;
1116 }
1117
61007b31
SH
1118 /* Align read if necessary by padding qiov */
1119 if (offset & (align - 1)) {
1120 head_buf = qemu_blockalign(bs, align);
1121 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1122 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1123 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1124 use_local_qiov = true;
1125
1126 bytes += offset & (align - 1);
1127 offset = offset & ~(align - 1);
1128 }
1129
1130 if ((offset + bytes) & (align - 1)) {
1131 if (!use_local_qiov) {
1132 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1133 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1134 use_local_qiov = true;
1135 }
1136 tail_buf = qemu_blockalign(bs, align);
1137 qemu_iovec_add(&local_qiov, tail_buf,
1138 align - ((offset + bytes) & (align - 1)));
1139
1140 bytes = ROUND_UP(bytes, align);
1141 }
1142
ebde595c 1143 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
61007b31
SH
1144 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1145 use_local_qiov ? &local_qiov : qiov,
1146 flags);
1147 tracked_request_end(&req);
99723548 1148 bdrv_dec_in_flight(bs);
61007b31
SH
1149
1150 if (use_local_qiov) {
1151 qemu_iovec_destroy(&local_qiov);
1152 qemu_vfree(head_buf);
1153 qemu_vfree(tail_buf);
1154 }
1155
1156 return ret;
1157}
1158
adad6496 1159static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
61007b31
SH
1160 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1161 BdrvRequestFlags flags)
1162{
1163 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1164 return -EINVAL;
1165 }
1166
a03ef88f 1167 return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1168 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1169}
1170
28b04a8f
KW
1171int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1172 int nb_sectors, QEMUIOVector *qiov)
61007b31 1173{
28b04a8f 1174 trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
61007b31 1175
adad6496 1176 return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1177}
1178
5def6b80
EB
1179/* Maximum buffer for write zeroes fallback, in bytes */
1180#define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
61007b31 1181
d05aa8bb
EB
1182static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1183 int64_t offset, int count, BdrvRequestFlags flags)
61007b31
SH
1184{
1185 BlockDriver *drv = bs->drv;
1186 QEMUIOVector qiov;
1187 struct iovec iov = {0};
1188 int ret = 0;
465fe887 1189 bool need_flush = false;
443668ca
DL
1190 int head = 0;
1191 int tail = 0;
61007b31 1192
cf081fca 1193 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1194 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1195 bs->bl.request_alignment);
d05aa8bb 1196
b8d0a980
EB
1197 assert(alignment % bs->bl.request_alignment == 0);
1198 head = offset % alignment;
1199 tail = (offset + count) % alignment;
1200 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1201 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1202
d05aa8bb
EB
1203 while (count > 0 && !ret) {
1204 int num = count;
61007b31
SH
1205
1206 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1207 * to be aligned, and that unaligned requests do not cross cluster
1208 * boundaries.
61007b31 1209 */
443668ca
DL
1210 if (head) {
1211 /* Make a small request up to the first aligned sector. */
d05aa8bb 1212 num = MIN(count, alignment - head);
443668ca 1213 head = 0;
d05aa8bb 1214 } else if (tail && num > alignment) {
443668ca
DL
1215 /* Shorten the request to the last aligned sector. */
1216 num -= tail;
61007b31
SH
1217 }
1218
1219 /* limit request size */
1220 if (num > max_write_zeroes) {
1221 num = max_write_zeroes;
1222 }
1223
1224 ret = -ENOTSUP;
1225 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1226 if (drv->bdrv_co_pwrite_zeroes) {
1227 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1228 flags & bs->supported_zero_flags);
1229 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1230 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1231 need_flush = true;
1232 }
465fe887
EB
1233 } else {
1234 assert(!bs->supported_zero_flags);
61007b31
SH
1235 }
1236
1237 if (ret == -ENOTSUP) {
1238 /* Fall back to bounce buffer if write zeroes is unsupported */
5def6b80 1239 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
61007b31 1240 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
465fe887
EB
1241 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1242
1243 if ((flags & BDRV_REQ_FUA) &&
1244 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1245 /* No need for bdrv_driver_pwrite() to do a fallback
1246 * flush on each chunk; use just one at the end */
1247 write_flags &= ~BDRV_REQ_FUA;
1248 need_flush = true;
1249 }
5def6b80 1250 num = MIN(num, max_transfer);
d05aa8bb 1251 iov.iov_len = num;
61007b31 1252 if (iov.iov_base == NULL) {
d05aa8bb 1253 iov.iov_base = qemu_try_blockalign(bs, num);
61007b31
SH
1254 if (iov.iov_base == NULL) {
1255 ret = -ENOMEM;
1256 goto fail;
1257 }
d05aa8bb 1258 memset(iov.iov_base, 0, num);
61007b31
SH
1259 }
1260 qemu_iovec_init_external(&qiov, &iov, 1);
1261
d05aa8bb 1262 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
61007b31
SH
1263
1264 /* Keep bounce buffer around if it is big enough for all
1265 * all future requests.
1266 */
5def6b80 1267 if (num < max_transfer) {
61007b31
SH
1268 qemu_vfree(iov.iov_base);
1269 iov.iov_base = NULL;
1270 }
1271 }
1272
d05aa8bb
EB
1273 offset += num;
1274 count -= num;
61007b31
SH
1275 }
1276
1277fail:
465fe887
EB
1278 if (ret == 0 && need_flush) {
1279 ret = bdrv_co_flush(bs);
1280 }
61007b31
SH
1281 qemu_vfree(iov.iov_base);
1282 return ret;
1283}
1284
1285/*
04ed95f4
EB
1286 * Forwards an already correctly aligned write request to the BlockDriver,
1287 * after possibly fragmenting it.
61007b31
SH
1288 */
1289static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1290 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
cff86b38 1291 int64_t align, QEMUIOVector *qiov, int flags)
61007b31
SH
1292{
1293 BlockDriver *drv = bs->drv;
1294 bool waited;
1295 int ret;
1296
9896c876
KW
1297 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1298 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
04ed95f4
EB
1299 uint64_t bytes_remaining = bytes;
1300 int max_transfer;
61007b31 1301
cff86b38
EB
1302 assert(is_power_of_2(align));
1303 assert((offset & (align - 1)) == 0);
1304 assert((bytes & (align - 1)) == 0);
61007b31 1305 assert(!qiov || bytes == qiov->size);
abb06c5a 1306 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
fa166538 1307 assert(!(flags & ~BDRV_REQ_MASK));
04ed95f4
EB
1308 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1309 align);
61007b31
SH
1310
1311 waited = wait_serialising_requests(req);
1312 assert(!waited || !req->serialising);
1313 assert(req->overlap_offset <= offset);
1314 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1315
1316 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1317
1318 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 1319 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
61007b31
SH
1320 qemu_iovec_is_zero(qiov)) {
1321 flags |= BDRV_REQ_ZERO_WRITE;
1322 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1323 flags |= BDRV_REQ_MAY_UNMAP;
1324 }
1325 }
1326
1327 if (ret < 0) {
1328 /* Do nothing, write notifier decided to fail this request */
1329 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 1330 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 1331 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091
PB
1332 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1333 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
04ed95f4 1334 } else if (bytes <= max_transfer) {
9a4f4c31 1335 bdrv_debug_event(bs, BLKDBG_PWRITEV);
78a07294 1336 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
04ed95f4
EB
1337 } else {
1338 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1339 while (bytes_remaining) {
1340 int num = MIN(bytes_remaining, max_transfer);
1341 QEMUIOVector local_qiov;
1342 int local_flags = flags;
1343
1344 assert(num);
1345 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1346 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1347 /* If FUA is going to be emulated by flush, we only
1348 * need to flush on the last iteration */
1349 local_flags &= ~BDRV_REQ_FUA;
1350 }
1351 qemu_iovec_init(&local_qiov, qiov->niov);
1352 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1353
1354 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1355 num, &local_qiov, local_flags);
1356 qemu_iovec_destroy(&local_qiov);
1357 if (ret < 0) {
1358 break;
1359 }
1360 bytes_remaining -= num;
1361 }
61007b31 1362 }
9a4f4c31 1363 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 1364
3ff2f67a 1365 ++bs->write_gen;
9896c876 1366 bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
61007b31 1367
53d8f9d8
HR
1368 if (bs->wr_highest_offset < offset + bytes) {
1369 bs->wr_highest_offset = offset + bytes;
1370 }
61007b31
SH
1371
1372 if (ret >= 0) {
9896c876 1373 bs->total_sectors = MAX(bs->total_sectors, end_sector);
04ed95f4 1374 ret = 0;
61007b31
SH
1375 }
1376
1377 return ret;
1378}
1379
9eeb6dd1
FZ
1380static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1381 int64_t offset,
1382 unsigned int bytes,
1383 BdrvRequestFlags flags,
1384 BdrvTrackedRequest *req)
1385{
1386 uint8_t *buf = NULL;
1387 QEMUIOVector local_qiov;
1388 struct iovec iov;
a5b8dd2c 1389 uint64_t align = bs->bl.request_alignment;
9eeb6dd1
FZ
1390 unsigned int head_padding_bytes, tail_padding_bytes;
1391 int ret = 0;
1392
1393 head_padding_bytes = offset & (align - 1);
1394 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1395
1396
1397 assert(flags & BDRV_REQ_ZERO_WRITE);
1398 if (head_padding_bytes || tail_padding_bytes) {
1399 buf = qemu_blockalign(bs, align);
1400 iov = (struct iovec) {
1401 .iov_base = buf,
1402 .iov_len = align,
1403 };
1404 qemu_iovec_init_external(&local_qiov, &iov, 1);
1405 }
1406 if (head_padding_bytes) {
1407 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1408
1409 /* RMW the unaligned part before head. */
1410 mark_request_serialising(req, align);
1411 wait_serialising_requests(req);
9a4f4c31 1412 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
9eeb6dd1
FZ
1413 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1414 align, &local_qiov, 0);
1415 if (ret < 0) {
1416 goto fail;
1417 }
9a4f4c31 1418 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
9eeb6dd1
FZ
1419
1420 memset(buf + head_padding_bytes, 0, zero_bytes);
1421 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
cff86b38 1422 align, &local_qiov,
9eeb6dd1
FZ
1423 flags & ~BDRV_REQ_ZERO_WRITE);
1424 if (ret < 0) {
1425 goto fail;
1426 }
1427 offset += zero_bytes;
1428 bytes -= zero_bytes;
1429 }
1430
1431 assert(!bytes || (offset & (align - 1)) == 0);
1432 if (bytes >= align) {
1433 /* Write the aligned part in the middle. */
1434 uint64_t aligned_bytes = bytes & ~(align - 1);
cff86b38 1435 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
9eeb6dd1
FZ
1436 NULL, flags);
1437 if (ret < 0) {
1438 goto fail;
1439 }
1440 bytes -= aligned_bytes;
1441 offset += aligned_bytes;
1442 }
1443
1444 assert(!bytes || (offset & (align - 1)) == 0);
1445 if (bytes) {
1446 assert(align == tail_padding_bytes + bytes);
1447 /* RMW the unaligned part after tail. */
1448 mark_request_serialising(req, align);
1449 wait_serialising_requests(req);
9a4f4c31 1450 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
9eeb6dd1
FZ
1451 ret = bdrv_aligned_preadv(bs, req, offset, align,
1452 align, &local_qiov, 0);
1453 if (ret < 0) {
1454 goto fail;
1455 }
9a4f4c31 1456 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
9eeb6dd1
FZ
1457
1458 memset(buf, 0, bytes);
cff86b38 1459 ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
9eeb6dd1
FZ
1460 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1461 }
1462fail:
1463 qemu_vfree(buf);
1464 return ret;
1465
1466}
1467
61007b31
SH
1468/*
1469 * Handle a write request in coroutine context
1470 */
a03ef88f 1471int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
61007b31
SH
1472 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1473 BdrvRequestFlags flags)
1474{
a03ef88f 1475 BlockDriverState *bs = child->bs;
61007b31 1476 BdrvTrackedRequest req;
a5b8dd2c 1477 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1478 uint8_t *head_buf = NULL;
1479 uint8_t *tail_buf = NULL;
1480 QEMUIOVector local_qiov;
1481 bool use_local_qiov = false;
1482 int ret;
1483
1484 if (!bs->drv) {
1485 return -ENOMEDIUM;
1486 }
1487 if (bs->read_only) {
eaf5fe2d 1488 return -EPERM;
61007b31 1489 }
04c01a5c 1490 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31
SH
1491
1492 ret = bdrv_check_byte_request(bs, offset, bytes);
1493 if (ret < 0) {
1494 return ret;
1495 }
1496
99723548 1497 bdrv_inc_in_flight(bs);
61007b31
SH
1498 /*
1499 * Align write if necessary by performing a read-modify-write cycle.
1500 * Pad qiov with the read parts and be sure to have a tracked request not
1501 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1502 */
ebde595c 1503 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 1504
9eeb6dd1
FZ
1505 if (!qiov) {
1506 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1507 goto out;
1508 }
1509
61007b31
SH
1510 if (offset & (align - 1)) {
1511 QEMUIOVector head_qiov;
1512 struct iovec head_iov;
1513
1514 mark_request_serialising(&req, align);
1515 wait_serialising_requests(&req);
1516
1517 head_buf = qemu_blockalign(bs, align);
1518 head_iov = (struct iovec) {
1519 .iov_base = head_buf,
1520 .iov_len = align,
1521 };
1522 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1523
9a4f4c31 1524 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
61007b31
SH
1525 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1526 align, &head_qiov, 0);
1527 if (ret < 0) {
1528 goto fail;
1529 }
9a4f4c31 1530 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
61007b31
SH
1531
1532 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1533 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1534 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1535 use_local_qiov = true;
1536
1537 bytes += offset & (align - 1);
1538 offset = offset & ~(align - 1);
117bc3fa
PL
1539
1540 /* We have read the tail already if the request is smaller
1541 * than one aligned block.
1542 */
1543 if (bytes < align) {
1544 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1545 bytes = align;
1546 }
61007b31
SH
1547 }
1548
1549 if ((offset + bytes) & (align - 1)) {
1550 QEMUIOVector tail_qiov;
1551 struct iovec tail_iov;
1552 size_t tail_bytes;
1553 bool waited;
1554
1555 mark_request_serialising(&req, align);
1556 waited = wait_serialising_requests(&req);
1557 assert(!waited || !use_local_qiov);
1558
1559 tail_buf = qemu_blockalign(bs, align);
1560 tail_iov = (struct iovec) {
1561 .iov_base = tail_buf,
1562 .iov_len = align,
1563 };
1564 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1565
9a4f4c31 1566 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
61007b31
SH
1567 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1568 align, &tail_qiov, 0);
1569 if (ret < 0) {
1570 goto fail;
1571 }
9a4f4c31 1572 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
61007b31
SH
1573
1574 if (!use_local_qiov) {
1575 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1576 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1577 use_local_qiov = true;
1578 }
1579
1580 tail_bytes = (offset + bytes) & (align - 1);
1581 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1582
1583 bytes = ROUND_UP(bytes, align);
1584 }
1585
3ea1a091
PB
1586 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
1587 use_local_qiov ? &local_qiov : qiov,
1588 flags);
61007b31
SH
1589
1590fail:
61007b31
SH
1591
1592 if (use_local_qiov) {
1593 qemu_iovec_destroy(&local_qiov);
1594 }
1595 qemu_vfree(head_buf);
1596 qemu_vfree(tail_buf);
9eeb6dd1
FZ
1597out:
1598 tracked_request_end(&req);
99723548 1599 bdrv_dec_in_flight(bs);
61007b31
SH
1600 return ret;
1601}
1602
adad6496 1603static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
61007b31
SH
1604 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1605 BdrvRequestFlags flags)
1606{
1607 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1608 return -EINVAL;
1609 }
1610
a03ef88f 1611 return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1612 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1613}
1614
25ec177d 1615int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
61007b31
SH
1616 int nb_sectors, QEMUIOVector *qiov)
1617{
25ec177d 1618 trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
61007b31 1619
adad6496 1620 return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1621}
1622
a03ef88f
KW
1623int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1624 int count, BdrvRequestFlags flags)
61007b31 1625{
a03ef88f 1626 trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
61007b31 1627
a03ef88f 1628 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
1629 flags &= ~BDRV_REQ_MAY_UNMAP;
1630 }
61007b31 1631
a03ef88f 1632 return bdrv_co_pwritev(child, offset, count, NULL,
74021bc4 1633 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
1634}
1635
4085f5c7
JS
1636/*
1637 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1638 */
1639int bdrv_flush_all(void)
1640{
1641 BdrvNextIterator it;
1642 BlockDriverState *bs = NULL;
1643 int result = 0;
1644
1645 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1646 AioContext *aio_context = bdrv_get_aio_context(bs);
1647 int ret;
1648
1649 aio_context_acquire(aio_context);
1650 ret = bdrv_flush(bs);
1651 if (ret < 0 && !result) {
1652 result = ret;
1653 }
1654 aio_context_release(aio_context);
1655 }
1656
1657 return result;
1658}
1659
1660
61007b31
SH
1661typedef struct BdrvCoGetBlockStatusData {
1662 BlockDriverState *bs;
1663 BlockDriverState *base;
67a0fd2a 1664 BlockDriverState **file;
61007b31
SH
1665 int64_t sector_num;
1666 int nb_sectors;
1667 int *pnum;
1668 int64_t ret;
1669 bool done;
1670} BdrvCoGetBlockStatusData;
1671
1672/*
1673 * Returns the allocation status of the specified sectors.
1674 * Drivers not implementing the functionality are assumed to not support
1675 * backing files, hence all their sectors are reported as allocated.
1676 *
1677 * If 'sector_num' is beyond the end of the disk image the return value is 0
1678 * and 'pnum' is set to 0.
1679 *
1680 * 'pnum' is set to the number of sectors (including and immediately following
1681 * the specified sector) that are known to be in the same
1682 * allocated/unallocated state.
1683 *
1684 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1685 * beyond the end of the disk image it will be clamped.
67a0fd2a
FZ
1686 *
1687 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1688 * points to the BDS which the sector range is allocated in.
61007b31
SH
1689 */
1690static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1691 int64_t sector_num,
67a0fd2a
FZ
1692 int nb_sectors, int *pnum,
1693 BlockDriverState **file)
61007b31
SH
1694{
1695 int64_t total_sectors;
1696 int64_t n;
1697 int64_t ret, ret2;
1698
1699 total_sectors = bdrv_nb_sectors(bs);
1700 if (total_sectors < 0) {
1701 return total_sectors;
1702 }
1703
1704 if (sector_num >= total_sectors) {
1705 *pnum = 0;
1706 return 0;
1707 }
1708
1709 n = total_sectors - sector_num;
1710 if (n < nb_sectors) {
1711 nb_sectors = n;
1712 }
1713
1714 if (!bs->drv->bdrv_co_get_block_status) {
1715 *pnum = nb_sectors;
1716 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1717 if (bs->drv->protocol_name) {
1718 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1719 }
1720 return ret;
1721 }
1722
67a0fd2a 1723 *file = NULL;
99723548 1724 bdrv_inc_in_flight(bs);
67a0fd2a
FZ
1725 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1726 file);
61007b31
SH
1727 if (ret < 0) {
1728 *pnum = 0;
99723548 1729 goto out;
61007b31
SH
1730 }
1731
1732 if (ret & BDRV_BLOCK_RAW) {
1733 assert(ret & BDRV_BLOCK_OFFSET_VALID);
99723548
PB
1734 ret = bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1735 *pnum, pnum, file);
1736 goto out;
61007b31
SH
1737 }
1738
1739 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1740 ret |= BDRV_BLOCK_ALLOCATED;
a53f1a95 1741 } else {
61007b31
SH
1742 if (bdrv_unallocated_blocks_are_zero(bs)) {
1743 ret |= BDRV_BLOCK_ZERO;
760e0063
KW
1744 } else if (bs->backing) {
1745 BlockDriverState *bs2 = bs->backing->bs;
61007b31
SH
1746 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1747 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1748 ret |= BDRV_BLOCK_ZERO;
1749 }
1750 }
1751 }
1752
ac987b30 1753 if (*file && *file != bs &&
61007b31
SH
1754 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1755 (ret & BDRV_BLOCK_OFFSET_VALID)) {
67a0fd2a 1756 BlockDriverState *file2;
61007b31
SH
1757 int file_pnum;
1758
ac987b30 1759 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
67a0fd2a 1760 *pnum, &file_pnum, &file2);
61007b31
SH
1761 if (ret2 >= 0) {
1762 /* Ignore errors. This is just providing extra information, it
1763 * is useful but not necessary.
1764 */
1765 if (!file_pnum) {
1766 /* !file_pnum indicates an offset at or beyond the EOF; it is
1767 * perfectly valid for the format block driver to point to such
1768 * offsets, so catch it and mark everything as zero */
1769 ret |= BDRV_BLOCK_ZERO;
1770 } else {
1771 /* Limit request to the range reported by the protocol driver */
1772 *pnum = file_pnum;
1773 ret |= (ret2 & BDRV_BLOCK_ZERO);
1774 }
1775 }
1776 }
1777
99723548
PB
1778out:
1779 bdrv_dec_in_flight(bs);
61007b31
SH
1780 return ret;
1781}
1782
ba3f0e25
FZ
1783static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1784 BlockDriverState *base,
1785 int64_t sector_num,
1786 int nb_sectors,
67a0fd2a
FZ
1787 int *pnum,
1788 BlockDriverState **file)
ba3f0e25
FZ
1789{
1790 BlockDriverState *p;
1791 int64_t ret = 0;
1792
1793 assert(bs != base);
760e0063 1794 for (p = bs; p != base; p = backing_bs(p)) {
67a0fd2a 1795 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
ba3f0e25
FZ
1796 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1797 break;
1798 }
1799 /* [sector_num, pnum] unallocated on this layer, which could be only
1800 * the first part of [sector_num, nb_sectors]. */
1801 nb_sectors = MIN(nb_sectors, *pnum);
1802 }
1803 return ret;
1804}
1805
1806/* Coroutine wrapper for bdrv_get_block_status_above() */
1807static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
61007b31
SH
1808{
1809 BdrvCoGetBlockStatusData *data = opaque;
61007b31 1810
ba3f0e25
FZ
1811 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1812 data->sector_num,
1813 data->nb_sectors,
67a0fd2a
FZ
1814 data->pnum,
1815 data->file);
61007b31
SH
1816 data->done = true;
1817}
1818
1819/*
ba3f0e25 1820 * Synchronous wrapper around bdrv_co_get_block_status_above().
61007b31 1821 *
ba3f0e25 1822 * See bdrv_co_get_block_status_above() for details.
61007b31 1823 */
ba3f0e25
FZ
1824int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1825 BlockDriverState *base,
1826 int64_t sector_num,
67a0fd2a
FZ
1827 int nb_sectors, int *pnum,
1828 BlockDriverState **file)
61007b31
SH
1829{
1830 Coroutine *co;
1831 BdrvCoGetBlockStatusData data = {
1832 .bs = bs,
ba3f0e25 1833 .base = base,
67a0fd2a 1834 .file = file,
61007b31
SH
1835 .sector_num = sector_num,
1836 .nb_sectors = nb_sectors,
1837 .pnum = pnum,
1838 .done = false,
1839 };
1840
1841 if (qemu_in_coroutine()) {
1842 /* Fast-path if already in coroutine context */
ba3f0e25 1843 bdrv_get_block_status_above_co_entry(&data);
61007b31 1844 } else {
0b8b8753
PB
1845 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1846 &data);
1847 qemu_coroutine_enter(co);
88b062c2 1848 BDRV_POLL_WHILE(bs, !data.done);
61007b31
SH
1849 }
1850 return data.ret;
1851}
1852
ba3f0e25
FZ
1853int64_t bdrv_get_block_status(BlockDriverState *bs,
1854 int64_t sector_num,
67a0fd2a
FZ
1855 int nb_sectors, int *pnum,
1856 BlockDriverState **file)
ba3f0e25 1857{
760e0063 1858 return bdrv_get_block_status_above(bs, backing_bs(bs),
67a0fd2a 1859 sector_num, nb_sectors, pnum, file);
ba3f0e25
FZ
1860}
1861
61007b31
SH
1862int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1863 int nb_sectors, int *pnum)
1864{
67a0fd2a
FZ
1865 BlockDriverState *file;
1866 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1867 &file);
61007b31
SH
1868 if (ret < 0) {
1869 return ret;
1870 }
1871 return !!(ret & BDRV_BLOCK_ALLOCATED);
1872}
1873
1874/*
1875 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1876 *
1877 * Return true if the given sector is allocated in any image between
1878 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1879 * sector is allocated in any image of the chain. Return false otherwise.
1880 *
1881 * 'pnum' is set to the number of sectors (including and immediately following
1882 * the specified sector) that are known to be in the same
1883 * allocated/unallocated state.
1884 *
1885 */
1886int bdrv_is_allocated_above(BlockDriverState *top,
1887 BlockDriverState *base,
1888 int64_t sector_num,
1889 int nb_sectors, int *pnum)
1890{
1891 BlockDriverState *intermediate;
1892 int ret, n = nb_sectors;
1893
1894 intermediate = top;
1895 while (intermediate && intermediate != base) {
1896 int pnum_inter;
1897 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1898 &pnum_inter);
1899 if (ret < 0) {
1900 return ret;
1901 } else if (ret) {
1902 *pnum = pnum_inter;
1903 return 1;
1904 }
1905
1906 /*
1907 * [sector_num, nb_sectors] is unallocated on top but intermediate
1908 * might have
1909 *
1910 * [sector_num+x, nr_sectors] allocated.
1911 */
1912 if (n > pnum_inter &&
1913 (intermediate == top ||
1914 sector_num + pnum_inter < intermediate->total_sectors)) {
1915 n = pnum_inter;
1916 }
1917
760e0063 1918 intermediate = backing_bs(intermediate);
61007b31
SH
1919 }
1920
1921 *pnum = n;
1922 return 0;
1923}
1924
1a8ae822
KW
1925typedef struct BdrvVmstateCo {
1926 BlockDriverState *bs;
1927 QEMUIOVector *qiov;
1928 int64_t pos;
1929 bool is_read;
1930 int ret;
1931} BdrvVmstateCo;
1932
1933static int coroutine_fn
1934bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1935 bool is_read)
1936{
1937 BlockDriver *drv = bs->drv;
1938
1939 if (!drv) {
1940 return -ENOMEDIUM;
1941 } else if (drv->bdrv_load_vmstate) {
1942 return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
1943 : drv->bdrv_save_vmstate(bs, qiov, pos);
1944 } else if (bs->file) {
1945 return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1946 }
1947
1948 return -ENOTSUP;
1949}
1950
1951static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
1952{
1953 BdrvVmstateCo *co = opaque;
1954 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
1955}
1956
1957static inline int
1958bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1959 bool is_read)
1960{
1961 if (qemu_in_coroutine()) {
1962 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
1963 } else {
1964 BdrvVmstateCo data = {
1965 .bs = bs,
1966 .qiov = qiov,
1967 .pos = pos,
1968 .is_read = is_read,
1969 .ret = -EINPROGRESS,
1970 };
0b8b8753 1971 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1a8ae822 1972
0b8b8753 1973 qemu_coroutine_enter(co);
1a8ae822
KW
1974 while (data.ret == -EINPROGRESS) {
1975 aio_poll(bdrv_get_aio_context(bs), true);
1976 }
1977 return data.ret;
1978 }
1979}
1980
61007b31
SH
1981int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1982 int64_t pos, int size)
1983{
1984 QEMUIOVector qiov;
1985 struct iovec iov = {
1986 .iov_base = (void *) buf,
1987 .iov_len = size,
1988 };
b433d942 1989 int ret;
61007b31
SH
1990
1991 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
1992
1993 ret = bdrv_writev_vmstate(bs, &qiov, pos);
1994 if (ret < 0) {
1995 return ret;
1996 }
1997
1998 return size;
61007b31
SH
1999}
2000
2001int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2002{
1a8ae822 2003 return bdrv_rw_vmstate(bs, qiov, pos, false);
61007b31
SH
2004}
2005
2006int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2007 int64_t pos, int size)
5ddda0b8
KW
2008{
2009 QEMUIOVector qiov;
2010 struct iovec iov = {
2011 .iov_base = buf,
2012 .iov_len = size,
2013 };
b433d942 2014 int ret;
5ddda0b8
KW
2015
2016 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
2017 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2018 if (ret < 0) {
2019 return ret;
2020 }
2021
2022 return size;
5ddda0b8
KW
2023}
2024
2025int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2026{
1a8ae822 2027 return bdrv_rw_vmstate(bs, qiov, pos, true);
61007b31
SH
2028}
2029
2030/**************************************************************/
2031/* async I/Os */
2032
ebb7af21 2033BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
61007b31
SH
2034 QEMUIOVector *qiov, int nb_sectors,
2035 BlockCompletionFunc *cb, void *opaque)
2036{
ebb7af21 2037 trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
61007b31 2038
b15404e0
EB
2039 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2040 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2041 0, cb, opaque, false);
61007b31
SH
2042}
2043
0d1049c7 2044BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
61007b31
SH
2045 QEMUIOVector *qiov, int nb_sectors,
2046 BlockCompletionFunc *cb, void *opaque)
2047{
0d1049c7 2048 trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
61007b31 2049
b15404e0
EB
2050 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2051 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2052 0, cb, opaque, true);
61007b31
SH
2053}
2054
61007b31
SH
2055void bdrv_aio_cancel(BlockAIOCB *acb)
2056{
2057 qemu_aio_ref(acb);
2058 bdrv_aio_cancel_async(acb);
2059 while (acb->refcnt > 1) {
2060 if (acb->aiocb_info->get_aio_context) {
2061 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2062 } else if (acb->bs) {
2063 aio_poll(bdrv_get_aio_context(acb->bs), true);
2064 } else {
2065 abort();
2066 }
2067 }
2068 qemu_aio_unref(acb);
2069}
2070
2071/* Async version of aio cancel. The caller is not blocked if the acb implements
2072 * cancel_async, otherwise we do nothing and let the request normally complete.
2073 * In either case the completion callback must be called. */
2074void bdrv_aio_cancel_async(BlockAIOCB *acb)
2075{
2076 if (acb->aiocb_info->cancel_async) {
2077 acb->aiocb_info->cancel_async(acb);
2078 }
2079}
2080
2081/**************************************************************/
2082/* async block device emulation */
2083
41574268
EB
2084typedef struct BlockRequest {
2085 union {
2086 /* Used during read, write, trim */
2087 struct {
b15404e0
EB
2088 int64_t offset;
2089 int bytes;
41574268
EB
2090 int flags;
2091 QEMUIOVector *qiov;
2092 };
2093 /* Used during ioctl */
2094 struct {
2095 int req;
2096 void *buf;
2097 };
2098 };
2099 BlockCompletionFunc *cb;
2100 void *opaque;
2101
2102 int error;
2103} BlockRequest;
2104
61007b31
SH
2105typedef struct BlockAIOCBCoroutine {
2106 BlockAIOCB common;
adad6496 2107 BdrvChild *child;
61007b31
SH
2108 BlockRequest req;
2109 bool is_write;
2110 bool need_bh;
2111 bool *done;
61007b31
SH
2112} BlockAIOCBCoroutine;
2113
2114static const AIOCBInfo bdrv_em_co_aiocb_info = {
2115 .aiocb_size = sizeof(BlockAIOCBCoroutine),
2116};
2117
2118static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2119{
2120 if (!acb->need_bh) {
99723548 2121 bdrv_dec_in_flight(acb->common.bs);
61007b31
SH
2122 acb->common.cb(acb->common.opaque, acb->req.error);
2123 qemu_aio_unref(acb);
2124 }
2125}
2126
2127static void bdrv_co_em_bh(void *opaque)
2128{
2129 BlockAIOCBCoroutine *acb = opaque;
2130
2131 assert(!acb->need_bh);
61007b31
SH
2132 bdrv_co_complete(acb);
2133}
2134
2135static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2136{
2137 acb->need_bh = false;
2138 if (acb->req.error != -EINPROGRESS) {
2139 BlockDriverState *bs = acb->common.bs;
2140
fffb6e12 2141 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
61007b31
SH
2142 }
2143}
2144
2145/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2146static void coroutine_fn bdrv_co_do_rw(void *opaque)
2147{
2148 BlockAIOCBCoroutine *acb = opaque;
61007b31
SH
2149
2150 if (!acb->is_write) {
b15404e0
EB
2151 acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
2152 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
61007b31 2153 } else {
b15404e0
EB
2154 acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
2155 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
61007b31
SH
2156 }
2157
2158 bdrv_co_complete(acb);
2159}
2160
b15404e0
EB
2161static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
2162 int64_t offset,
2163 QEMUIOVector *qiov,
2164 BdrvRequestFlags flags,
2165 BlockCompletionFunc *cb,
2166 void *opaque,
2167 bool is_write)
61007b31
SH
2168{
2169 Coroutine *co;
2170 BlockAIOCBCoroutine *acb;
2171
99723548
PB
2172 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2173 bdrv_inc_in_flight(child->bs);
2174
adad6496
KW
2175 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
2176 acb->child = child;
61007b31
SH
2177 acb->need_bh = true;
2178 acb->req.error = -EINPROGRESS;
b15404e0 2179 acb->req.offset = offset;
61007b31
SH
2180 acb->req.qiov = qiov;
2181 acb->req.flags = flags;
2182 acb->is_write = is_write;
2183
0b8b8753
PB
2184 co = qemu_coroutine_create(bdrv_co_do_rw, acb);
2185 qemu_coroutine_enter(co);
61007b31
SH
2186
2187 bdrv_co_maybe_schedule_bh(acb);
2188 return &acb->common;
2189}
2190
2191static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2192{
2193 BlockAIOCBCoroutine *acb = opaque;
2194 BlockDriverState *bs = acb->common.bs;
2195
2196 acb->req.error = bdrv_co_flush(bs);
2197 bdrv_co_complete(acb);
2198}
2199
2200BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2201 BlockCompletionFunc *cb, void *opaque)
2202{
2203 trace_bdrv_aio_flush(bs, opaque);
2204
2205 Coroutine *co;
2206 BlockAIOCBCoroutine *acb;
2207
99723548
PB
2208 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2209 bdrv_inc_in_flight(bs);
2210
61007b31
SH
2211 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2212 acb->need_bh = true;
2213 acb->req.error = -EINPROGRESS;
2214
0b8b8753
PB
2215 co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
2216 qemu_coroutine_enter(co);
61007b31
SH
2217
2218 bdrv_co_maybe_schedule_bh(acb);
2219 return &acb->common;
2220}
2221
61007b31
SH
2222void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2223 BlockCompletionFunc *cb, void *opaque)
2224{
2225 BlockAIOCB *acb;
2226
c84b3192 2227 acb = g_malloc(aiocb_info->aiocb_size);
61007b31
SH
2228 acb->aiocb_info = aiocb_info;
2229 acb->bs = bs;
2230 acb->cb = cb;
2231 acb->opaque = opaque;
2232 acb->refcnt = 1;
2233 return acb;
2234}
2235
2236void qemu_aio_ref(void *p)
2237{
2238 BlockAIOCB *acb = p;
2239 acb->refcnt++;
2240}
2241
2242void qemu_aio_unref(void *p)
2243{
2244 BlockAIOCB *acb = p;
2245 assert(acb->refcnt > 0);
2246 if (--acb->refcnt == 0) {
c84b3192 2247 g_free(acb);
61007b31
SH
2248 }
2249}
2250
2251/**************************************************************/
2252/* Coroutine block device emulation */
2253
e293b7a3
KW
2254typedef struct FlushCo {
2255 BlockDriverState *bs;
2256 int ret;
2257} FlushCo;
2258
2259
61007b31
SH
2260static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2261{
e293b7a3 2262 FlushCo *rwco = opaque;
61007b31
SH
2263
2264 rwco->ret = bdrv_co_flush(rwco->bs);
2265}
2266
2267int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2268{
2269 int ret;
2270
1b6bc94d
DA
2271 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2272 bdrv_is_sg(bs)) {
61007b31
SH
2273 return 0;
2274 }
2275
99723548 2276 bdrv_inc_in_flight(bs);
c32b82af 2277
3ff2f67a
EY
2278 int current_gen = bs->write_gen;
2279
2280 /* Wait until any previous flushes are completed */
99723548 2281 while (bs->active_flush_req) {
3ff2f67a
EY
2282 qemu_co_queue_wait(&bs->flush_queue);
2283 }
2284
99723548 2285 bs->active_flush_req = true;
3ff2f67a 2286
c32b82af
PD
2287 /* Write back all layers by calling one driver function */
2288 if (bs->drv->bdrv_co_flush) {
2289 ret = bs->drv->bdrv_co_flush(bs);
2290 goto out;
2291 }
2292
61007b31
SH
2293 /* Write back cached data to the OS even with cache=unsafe */
2294 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2295 if (bs->drv->bdrv_co_flush_to_os) {
2296 ret = bs->drv->bdrv_co_flush_to_os(bs);
2297 if (ret < 0) {
cdb5e315 2298 goto out;
61007b31
SH
2299 }
2300 }
2301
2302 /* But don't actually force it to the disk with cache=unsafe */
2303 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2304 goto flush_parent;
2305 }
2306
3ff2f67a
EY
2307 /* Check if we really need to flush anything */
2308 if (bs->flushed_gen == current_gen) {
2309 goto flush_parent;
2310 }
2311
61007b31
SH
2312 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2313 if (bs->drv->bdrv_co_flush_to_disk) {
2314 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2315 } else if (bs->drv->bdrv_aio_flush) {
2316 BlockAIOCB *acb;
2317 CoroutineIOCompletion co = {
2318 .coroutine = qemu_coroutine_self(),
2319 };
2320
2321 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2322 if (acb == NULL) {
2323 ret = -EIO;
2324 } else {
2325 qemu_coroutine_yield();
2326 ret = co.ret;
2327 }
2328 } else {
2329 /*
2330 * Some block drivers always operate in either writethrough or unsafe
2331 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2332 * know how the server works (because the behaviour is hardcoded or
2333 * depends on server-side configuration), so we can't ensure that
2334 * everything is safe on disk. Returning an error doesn't work because
2335 * that would break guests even if the server operates in writethrough
2336 * mode.
2337 *
2338 * Let's hope the user knows what he's doing.
2339 */
2340 ret = 0;
2341 }
3ff2f67a 2342
61007b31 2343 if (ret < 0) {
cdb5e315 2344 goto out;
61007b31
SH
2345 }
2346
2347 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2348 * in the case of cache=unsafe, so there are no useless flushes.
2349 */
2350flush_parent:
cdb5e315
FZ
2351 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2352out:
3ff2f67a
EY
2353 /* Notify any pending flushes that we have completed */
2354 bs->flushed_gen = current_gen;
99723548 2355 bs->active_flush_req = false;
156af3ac
DL
2356 /* Return value is ignored - it's ok if wait queue is empty */
2357 qemu_co_queue_next(&bs->flush_queue);
3ff2f67a 2358
99723548 2359 bdrv_dec_in_flight(bs);
cdb5e315 2360 return ret;
61007b31
SH
2361}
2362
2363int bdrv_flush(BlockDriverState *bs)
2364{
2365 Coroutine *co;
e293b7a3 2366 FlushCo flush_co = {
61007b31
SH
2367 .bs = bs,
2368 .ret = NOT_DONE,
2369 };
2370
2371 if (qemu_in_coroutine()) {
2372 /* Fast-path if already in coroutine context */
e293b7a3 2373 bdrv_flush_co_entry(&flush_co);
61007b31 2374 } else {
0b8b8753
PB
2375 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2376 qemu_coroutine_enter(co);
88b062c2 2377 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
61007b31
SH
2378 }
2379
e293b7a3 2380 return flush_co.ret;
61007b31
SH
2381}
2382
2383typedef struct DiscardCo {
2384 BlockDriverState *bs;
0c51a893
EB
2385 int64_t offset;
2386 int count;
61007b31
SH
2387 int ret;
2388} DiscardCo;
0c51a893 2389static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
61007b31
SH
2390{
2391 DiscardCo *rwco = opaque;
2392
0c51a893 2393 rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
61007b31
SH
2394}
2395
9f1963b3
EB
2396int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2397 int count)
61007b31 2398{
b1066c87 2399 BdrvTrackedRequest req;
9f1963b3
EB
2400 int max_pdiscard, ret;
2401 int head, align;
61007b31
SH
2402
2403 if (!bs->drv) {
2404 return -ENOMEDIUM;
2405 }
2406
9f1963b3 2407 ret = bdrv_check_byte_request(bs, offset, count);
61007b31
SH
2408 if (ret < 0) {
2409 return ret;
2410 } else if (bs->read_only) {
eaf5fe2d 2411 return -EPERM;
61007b31 2412 }
04c01a5c 2413 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31 2414
61007b31
SH
2415 /* Do nothing if disabled. */
2416 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2417 return 0;
2418 }
2419
02aefe43 2420 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
2421 return 0;
2422 }
2423
9f1963b3 2424 /* Discard is advisory, so ignore any unaligned head or tail */
02aefe43 2425 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
2426 assert(align % bs->bl.request_alignment == 0);
2427 head = offset % align;
9f1963b3 2428 if (head) {
b8d0a980 2429 head = MIN(count, align - head);
9f1963b3
EB
2430 count -= head;
2431 offset += head;
2432 }
2433 count = QEMU_ALIGN_DOWN(count, align);
2434 if (!count) {
2435 return 0;
2436 }
2437
99723548 2438 bdrv_inc_in_flight(bs);
9f1963b3 2439 tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
50824995 2440
ec050f77
DL
2441 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2442 if (ret < 0) {
2443 goto out;
2444 }
2445
9f1963b3
EB
2446 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2447 align);
b8d0a980 2448 assert(max_pdiscard);
61007b31 2449
9f1963b3
EB
2450 while (count > 0) {
2451 int ret;
2452 int num = MIN(count, max_pdiscard);
61007b31 2453
47a5486d
EB
2454 if (bs->drv->bdrv_co_pdiscard) {
2455 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
2456 } else {
2457 BlockAIOCB *acb;
2458 CoroutineIOCompletion co = {
2459 .coroutine = qemu_coroutine_self(),
2460 };
2461
4da444a0
EB
2462 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2463 bdrv_co_io_em_complete, &co);
61007b31 2464 if (acb == NULL) {
b1066c87
FZ
2465 ret = -EIO;
2466 goto out;
61007b31
SH
2467 } else {
2468 qemu_coroutine_yield();
2469 ret = co.ret;
2470 }
2471 }
2472 if (ret && ret != -ENOTSUP) {
b1066c87 2473 goto out;
61007b31
SH
2474 }
2475
9f1963b3
EB
2476 offset += num;
2477 count -= num;
61007b31 2478 }
b1066c87
FZ
2479 ret = 0;
2480out:
3ff2f67a 2481 ++bs->write_gen;
968d8b06
DL
2482 bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2483 req.bytes >> BDRV_SECTOR_BITS);
b1066c87 2484 tracked_request_end(&req);
99723548 2485 bdrv_dec_in_flight(bs);
b1066c87 2486 return ret;
61007b31
SH
2487}
2488
0c51a893 2489int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
61007b31
SH
2490{
2491 Coroutine *co;
2492 DiscardCo rwco = {
2493 .bs = bs,
0c51a893
EB
2494 .offset = offset,
2495 .count = count,
61007b31
SH
2496 .ret = NOT_DONE,
2497 };
2498
2499 if (qemu_in_coroutine()) {
2500 /* Fast-path if already in coroutine context */
0c51a893 2501 bdrv_pdiscard_co_entry(&rwco);
61007b31 2502 } else {
0c51a893 2503 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
0b8b8753 2504 qemu_coroutine_enter(co);
88b062c2 2505 BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
61007b31
SH
2506 }
2507
2508 return rwco.ret;
2509}
2510
48af776a 2511int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
2512{
2513 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
2514 CoroutineIOCompletion co = {
2515 .coroutine = qemu_coroutine_self(),
2516 };
2517 BlockAIOCB *acb;
61007b31 2518
99723548 2519 bdrv_inc_in_flight(bs);
16a389dc 2520 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
2521 co.ret = -ENOTSUP;
2522 goto out;
2523 }
2524
16a389dc
KW
2525 if (drv->bdrv_co_ioctl) {
2526 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2527 } else {
2528 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2529 if (!acb) {
2530 co.ret = -ENOTSUP;
2531 goto out;
2532 }
2533 qemu_coroutine_yield();
5c5ae76a 2534 }
5c5ae76a 2535out:
99723548 2536 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
2537 return co.ret;
2538}
2539
61007b31
SH
2540void *qemu_blockalign(BlockDriverState *bs, size_t size)
2541{
2542 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2543}
2544
2545void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2546{
2547 return memset(qemu_blockalign(bs, size), 0, size);
2548}
2549
2550void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2551{
2552 size_t align = bdrv_opt_mem_align(bs);
2553
2554 /* Ensure that NULL is never returned on success */
2555 assert(align > 0);
2556 if (size == 0) {
2557 size = align;
2558 }
2559
2560 return qemu_try_memalign(align, size);
2561}
2562
2563void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2564{
2565 void *mem = qemu_try_blockalign(bs, size);
2566
2567 if (mem) {
2568 memset(mem, 0, size);
2569 }
2570
2571 return mem;
2572}
2573
2574/*
2575 * Check if all memory in this vector is sector aligned.
2576 */
2577bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2578{
2579 int i;
4196d2f0 2580 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
2581
2582 for (i = 0; i < qiov->niov; i++) {
2583 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2584 return false;
2585 }
2586 if (qiov->iov[i].iov_len % alignment) {
2587 return false;
2588 }
2589 }
2590
2591 return true;
2592}
2593
2594void bdrv_add_before_write_notifier(BlockDriverState *bs,
2595 NotifierWithReturn *notifier)
2596{
2597 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2598}
2599
2600void bdrv_io_plug(BlockDriverState *bs)
2601{
6b98bd64
PB
2602 BdrvChild *child;
2603
2604 QLIST_FOREACH(child, &bs->children, next) {
2605 bdrv_io_plug(child->bs);
2606 }
2607
2608 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2609 BlockDriver *drv = bs->drv;
2610 if (drv && drv->bdrv_io_plug) {
2611 drv->bdrv_io_plug(bs);
2612 }
61007b31
SH
2613 }
2614}
2615
2616void bdrv_io_unplug(BlockDriverState *bs)
2617{
6b98bd64
PB
2618 BdrvChild *child;
2619
2620 assert(bs->io_plugged);
2621 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2622 BlockDriver *drv = bs->drv;
2623 if (drv && drv->bdrv_io_unplug) {
2624 drv->bdrv_io_unplug(bs);
2625 }
2626 }
2627
2628 QLIST_FOREACH(child, &bs->children, next) {
2629 bdrv_io_unplug(child->bs);
61007b31
SH
2630 }
2631}
2632
6b98bd64 2633void bdrv_io_unplugged_begin(BlockDriverState *bs)
61007b31 2634{
6b98bd64
PB
2635 BdrvChild *child;
2636
2637 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2638 BlockDriver *drv = bs->drv;
2639 if (drv && drv->bdrv_io_unplug) {
2640 drv->bdrv_io_unplug(bs);
2641 }
2642 }
2643
2644 QLIST_FOREACH(child, &bs->children, next) {
2645 bdrv_io_unplugged_begin(child->bs);
2646 }
2647}
2648
2649void bdrv_io_unplugged_end(BlockDriverState *bs)
2650{
2651 BdrvChild *child;
2652
2653 assert(bs->io_plug_disabled);
2654 QLIST_FOREACH(child, &bs->children, next) {
2655 bdrv_io_unplugged_end(child->bs);
2656 }
2657
2658 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2659 BlockDriver *drv = bs->drv;
2660 if (drv && drv->bdrv_io_plug) {
2661 drv->bdrv_io_plug(bs);
2662 }
61007b31
SH
2663 }
2664}