]> git.proxmox.com Git - mirror_qemu.git/blame - block/io.c
block: Convert bdrv_aio_discard() to byte-based
[mirror_qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
61007b31
SH
28#include "block/blockjob.h"
29#include "block/block_int.h"
f348b6d1 30#include "qemu/cutils.h"
da34e65c 31#include "qapi/error.h"
d49b6836 32#include "qemu/error-report.h"
61007b31
SH
33
34#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35
b15404e0
EB
36static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
37 int64_t offset,
38 QEMUIOVector *qiov,
39 BdrvRequestFlags flags,
40 BlockCompletionFunc *cb,
41 void *opaque,
42 bool is_write);
61007b31 43static void coroutine_fn bdrv_co_do_rw(void *opaque);
d05aa8bb
EB
44static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45 int64_t offset, int count, BdrvRequestFlags flags);
61007b31 46
c2066af0 47static void bdrv_parent_drained_begin(BlockDriverState *bs)
61007b31 48{
c2066af0 49 BdrvChild *c;
27ccdd52 50
c2066af0
KW
51 QLIST_FOREACH(c, &bs->parents, next_parent) {
52 if (c->role->drained_begin) {
53 c->role->drained_begin(c);
54 }
ce0f1412
PB
55 }
56}
61007b31 57
c2066af0 58static void bdrv_parent_drained_end(BlockDriverState *bs)
ce0f1412 59{
c2066af0 60 BdrvChild *c;
27ccdd52 61
c2066af0
KW
62 QLIST_FOREACH(c, &bs->parents, next_parent) {
63 if (c->role->drained_end) {
64 c->role->drained_end(c);
65 }
27ccdd52 66 }
61007b31
SH
67}
68
d9e0dfa2
EB
69static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
70{
71 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
72 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
73 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
74 src->opt_mem_alignment);
75 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
76 src->min_mem_alignment);
77 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
78}
79
61007b31
SH
80void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
81{
82 BlockDriver *drv = bs->drv;
83 Error *local_err = NULL;
84
85 memset(&bs->bl, 0, sizeof(bs->bl));
86
87 if (!drv) {
88 return;
89 }
90
79ba8c98 91 /* Default alignment based on whether driver has byte interface */
a5b8dd2c 92 bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
79ba8c98 93
61007b31
SH
94 /* Take some limits from the children as a default */
95 if (bs->file) {
9a4f4c31 96 bdrv_refresh_limits(bs->file->bs, &local_err);
61007b31
SH
97 if (local_err) {
98 error_propagate(errp, local_err);
99 return;
100 }
d9e0dfa2 101 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
61007b31 102 } else {
4196d2f0 103 bs->bl.min_mem_alignment = 512;
459b4e66 104 bs->bl.opt_mem_alignment = getpagesize();
bd44feb7
SH
105
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs->bl.max_iov = IOV_MAX;
61007b31
SH
108 }
109
760e0063
KW
110 if (bs->backing) {
111 bdrv_refresh_limits(bs->backing->bs, &local_err);
61007b31
SH
112 if (local_err) {
113 error_propagate(errp, local_err);
114 return;
115 }
d9e0dfa2 116 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
61007b31
SH
117 }
118
119 /* Then let the driver override it */
120 if (drv->bdrv_refresh_limits) {
121 drv->bdrv_refresh_limits(bs, errp);
122 }
123}
124
125/**
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
129 */
130void bdrv_enable_copy_on_read(BlockDriverState *bs)
131{
132 bs->copy_on_read++;
133}
134
135void bdrv_disable_copy_on_read(BlockDriverState *bs)
136{
137 assert(bs->copy_on_read > 0);
138 bs->copy_on_read--;
139}
140
141/* Check if any requests are in-flight (including throttled requests) */
439db28c 142bool bdrv_requests_pending(BlockDriverState *bs)
61007b31 143{
37a639a7
KW
144 BdrvChild *child;
145
61007b31
SH
146 if (!QLIST_EMPTY(&bs->tracked_requests)) {
147 return true;
148 }
37a639a7
KW
149
150 QLIST_FOREACH(child, &bs->children, next) {
151 if (bdrv_requests_pending(child->bs)) {
152 return true;
153 }
61007b31 154 }
37a639a7 155
61007b31
SH
156 return false;
157}
158
67da1dc5
FZ
159static void bdrv_drain_recurse(BlockDriverState *bs)
160{
161 BdrvChild *child;
162
163 if (bs->drv && bs->drv->bdrv_drain) {
164 bs->drv->bdrv_drain(bs);
165 }
166 QLIST_FOREACH(child, &bs->children, next) {
167 bdrv_drain_recurse(child->bs);
168 }
169}
170
a77fd4bb
FZ
171typedef struct {
172 Coroutine *co;
173 BlockDriverState *bs;
174 QEMUBH *bh;
175 bool done;
176} BdrvCoDrainData;
177
b6e84c97
PB
178static void bdrv_drain_poll(BlockDriverState *bs)
179{
180 bool busy = true;
181
182 while (busy) {
183 /* Keep iterating */
b6e84c97
PB
184 busy = bdrv_requests_pending(bs);
185 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
186 }
187}
188
a77fd4bb
FZ
189static void bdrv_co_drain_bh_cb(void *opaque)
190{
191 BdrvCoDrainData *data = opaque;
192 Coroutine *co = data->co;
193
194 qemu_bh_delete(data->bh);
b6e84c97 195 bdrv_drain_poll(data->bs);
a77fd4bb 196 data->done = true;
0b8b8753 197 qemu_coroutine_enter(co);
a77fd4bb
FZ
198}
199
b6e84c97 200static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
a77fd4bb
FZ
201{
202 BdrvCoDrainData data;
203
204 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
205 * other coroutines run if they were queued from
206 * qemu_co_queue_run_restart(). */
207
208 assert(qemu_in_coroutine());
209 data = (BdrvCoDrainData) {
210 .co = qemu_coroutine_self(),
211 .bs = bs,
212 .done = false,
213 .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
214 };
215 qemu_bh_schedule(data.bh);
216
217 qemu_coroutine_yield();
218 /* If we are resumed from some other event (such as an aio completion or a
219 * timer callback), it is a bug in the caller that should be fixed. */
220 assert(data.done);
221}
222
6820643f
KW
223void bdrv_drained_begin(BlockDriverState *bs)
224{
225 if (!bs->quiesce_counter++) {
226 aio_disable_external(bdrv_get_aio_context(bs));
227 bdrv_parent_drained_begin(bs);
228 }
229
230 bdrv_io_unplugged_begin(bs);
231 bdrv_drain_recurse(bs);
232 if (qemu_in_coroutine()) {
233 bdrv_co_yield_to_drain(bs);
234 } else {
235 bdrv_drain_poll(bs);
236 }
237 bdrv_io_unplugged_end(bs);
238}
239
240void bdrv_drained_end(BlockDriverState *bs)
241{
242 assert(bs->quiesce_counter > 0);
243 if (--bs->quiesce_counter > 0) {
244 return;
245 }
246
247 bdrv_parent_drained_end(bs);
248 aio_enable_external(bdrv_get_aio_context(bs));
249}
250
61007b31 251/*
67da1dc5
FZ
252 * Wait for pending requests to complete on a single BlockDriverState subtree,
253 * and suspend block driver's internal I/O until next request arrives.
61007b31 254 *
61007b31
SH
255 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
256 * AioContext.
7a63f3cd
SH
257 *
258 * Only this BlockDriverState's AioContext is run, so in-flight requests must
259 * not depend on events in other AioContexts. In that case, use
260 * bdrv_drain_all() instead.
61007b31 261 */
b6e84c97 262void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 263{
6820643f
KW
264 assert(qemu_in_coroutine());
265 bdrv_drained_begin(bs);
266 bdrv_drained_end(bs);
b6e84c97 267}
f406c03c 268
b6e84c97
PB
269void bdrv_drain(BlockDriverState *bs)
270{
6820643f
KW
271 bdrv_drained_begin(bs);
272 bdrv_drained_end(bs);
61007b31
SH
273}
274
275/*
276 * Wait for pending requests to complete across all BlockDriverStates
277 *
278 * This function does not flush data to disk, use bdrv_flush_all() for that
279 * after calling this function.
61007b31
SH
280 */
281void bdrv_drain_all(void)
282{
283 /* Always run first iteration so any pending completion BHs run */
284 bool busy = true;
7c8eece4 285 BlockDriverState *bs;
88be7b4b 286 BdrvNextIterator it;
eb1364ce 287 BlockJob *job = NULL;
f406c03c 288 GSList *aio_ctxs = NULL, *ctx;
61007b31 289
eb1364ce
AG
290 while ((job = block_job_next(job))) {
291 AioContext *aio_context = blk_get_aio_context(job->blk);
292
293 aio_context_acquire(aio_context);
294 block_job_pause(job);
295 aio_context_release(aio_context);
296 }
297
88be7b4b 298 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
299 AioContext *aio_context = bdrv_get_aio_context(bs);
300
301 aio_context_acquire(aio_context);
c2066af0 302 bdrv_parent_drained_begin(bs);
6b98bd64 303 bdrv_io_unplugged_begin(bs);
9dcf8ecd 304 bdrv_drain_recurse(bs);
61007b31 305 aio_context_release(aio_context);
f406c03c 306
764ba3ae 307 if (!g_slist_find(aio_ctxs, aio_context)) {
f406c03c
AY
308 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
309 }
61007b31
SH
310 }
311
7a63f3cd
SH
312 /* Note that completion of an asynchronous I/O operation can trigger any
313 * number of other I/O operations on other devices---for example a
314 * coroutine can submit an I/O request to another device in response to
315 * request completion. Therefore we must keep looping until there was no
316 * more activity rather than simply draining each device independently.
317 */
61007b31
SH
318 while (busy) {
319 busy = false;
61007b31 320
f406c03c
AY
321 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
322 AioContext *aio_context = ctx->data;
61007b31
SH
323
324 aio_context_acquire(aio_context);
88be7b4b 325 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
f406c03c 326 if (aio_context == bdrv_get_aio_context(bs)) {
f406c03c
AY
327 if (bdrv_requests_pending(bs)) {
328 busy = true;
329 aio_poll(aio_context, busy);
330 }
331 }
332 }
333 busy |= aio_poll(aio_context, false);
61007b31
SH
334 aio_context_release(aio_context);
335 }
336 }
337
88be7b4b 338 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
339 AioContext *aio_context = bdrv_get_aio_context(bs);
340
341 aio_context_acquire(aio_context);
6b98bd64 342 bdrv_io_unplugged_end(bs);
c2066af0 343 bdrv_parent_drained_end(bs);
61007b31
SH
344 aio_context_release(aio_context);
345 }
f406c03c 346 g_slist_free(aio_ctxs);
eb1364ce
AG
347
348 job = NULL;
349 while ((job = block_job_next(job))) {
350 AioContext *aio_context = blk_get_aio_context(job->blk);
351
352 aio_context_acquire(aio_context);
353 block_job_resume(job);
354 aio_context_release(aio_context);
355 }
61007b31
SH
356}
357
358/**
359 * Remove an active request from the tracked requests list
360 *
361 * This function should be called when a tracked request is completing.
362 */
363static void tracked_request_end(BdrvTrackedRequest *req)
364{
365 if (req->serialising) {
366 req->bs->serialising_in_flight--;
367 }
368
369 QLIST_REMOVE(req, list);
370 qemu_co_queue_restart_all(&req->wait_queue);
371}
372
373/**
374 * Add an active request to the tracked requests list
375 */
376static void tracked_request_begin(BdrvTrackedRequest *req,
377 BlockDriverState *bs,
378 int64_t offset,
ebde595c
FZ
379 unsigned int bytes,
380 enum BdrvTrackedRequestType type)
61007b31
SH
381{
382 *req = (BdrvTrackedRequest){
383 .bs = bs,
384 .offset = offset,
385 .bytes = bytes,
ebde595c 386 .type = type,
61007b31
SH
387 .co = qemu_coroutine_self(),
388 .serialising = false,
389 .overlap_offset = offset,
390 .overlap_bytes = bytes,
391 };
392
393 qemu_co_queue_init(&req->wait_queue);
394
395 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
396}
397
398static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
399{
400 int64_t overlap_offset = req->offset & ~(align - 1);
401 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
402 - overlap_offset;
403
404 if (!req->serialising) {
405 req->bs->serialising_in_flight++;
406 req->serialising = true;
407 }
408
409 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
410 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
411}
412
413/**
244483e6 414 * Round a region to cluster boundaries (sector-based)
61007b31 415 */
244483e6
KW
416void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
417 int64_t sector_num, int nb_sectors,
418 int64_t *cluster_sector_num,
419 int *cluster_nb_sectors)
61007b31
SH
420{
421 BlockDriverInfo bdi;
422
423 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
424 *cluster_sector_num = sector_num;
425 *cluster_nb_sectors = nb_sectors;
426 } else {
427 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
428 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
429 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
430 nb_sectors, c);
431 }
432}
433
244483e6
KW
434/**
435 * Round a region to cluster boundaries
436 */
437void bdrv_round_to_clusters(BlockDriverState *bs,
438 int64_t offset, unsigned int bytes,
439 int64_t *cluster_offset,
440 unsigned int *cluster_bytes)
441{
442 BlockDriverInfo bdi;
443
444 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
445 *cluster_offset = offset;
446 *cluster_bytes = bytes;
447 } else {
448 int64_t c = bdi.cluster_size;
449 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
450 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
451 }
452}
453
61007b31
SH
454static int bdrv_get_cluster_size(BlockDriverState *bs)
455{
456 BlockDriverInfo bdi;
457 int ret;
458
459 ret = bdrv_get_info(bs, &bdi);
460 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 461 return bs->bl.request_alignment;
61007b31
SH
462 } else {
463 return bdi.cluster_size;
464 }
465}
466
467static bool tracked_request_overlaps(BdrvTrackedRequest *req,
468 int64_t offset, unsigned int bytes)
469{
470 /* aaaa bbbb */
471 if (offset >= req->overlap_offset + req->overlap_bytes) {
472 return false;
473 }
474 /* bbbb aaaa */
475 if (req->overlap_offset >= offset + bytes) {
476 return false;
477 }
478 return true;
479}
480
481static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
482{
483 BlockDriverState *bs = self->bs;
484 BdrvTrackedRequest *req;
485 bool retry;
486 bool waited = false;
487
488 if (!bs->serialising_in_flight) {
489 return false;
490 }
491
492 do {
493 retry = false;
494 QLIST_FOREACH(req, &bs->tracked_requests, list) {
495 if (req == self || (!req->serialising && !self->serialising)) {
496 continue;
497 }
498 if (tracked_request_overlaps(req, self->overlap_offset,
499 self->overlap_bytes))
500 {
501 /* Hitting this means there was a reentrant request, for
502 * example, a block driver issuing nested requests. This must
503 * never happen since it means deadlock.
504 */
505 assert(qemu_coroutine_self() != req->co);
506
507 /* If the request is already (indirectly) waiting for us, or
508 * will wait for us as soon as it wakes up, then just go on
509 * (instead of producing a deadlock in the former case). */
510 if (!req->waiting_for) {
511 self->waiting_for = req;
512 qemu_co_queue_wait(&req->wait_queue);
513 self->waiting_for = NULL;
514 retry = true;
515 waited = true;
516 break;
517 }
518 }
519 }
520 } while (retry);
521
522 return waited;
523}
524
525static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
526 size_t size)
527{
528 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
529 return -EIO;
530 }
531
532 if (!bdrv_is_inserted(bs)) {
533 return -ENOMEDIUM;
534 }
535
536 if (offset < 0) {
537 return -EIO;
538 }
539
540 return 0;
541}
542
543static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
544 int nb_sectors)
545{
546 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
547 return -EIO;
548 }
549
550 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
551 nb_sectors * BDRV_SECTOR_SIZE);
552}
553
554typedef struct RwCo {
e293b7a3 555 BdrvChild *child;
61007b31
SH
556 int64_t offset;
557 QEMUIOVector *qiov;
558 bool is_write;
559 int ret;
560 BdrvRequestFlags flags;
561} RwCo;
562
563static void coroutine_fn bdrv_rw_co_entry(void *opaque)
564{
565 RwCo *rwco = opaque;
566
567 if (!rwco->is_write) {
a03ef88f 568 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
cab3a356
KW
569 rwco->qiov->size, rwco->qiov,
570 rwco->flags);
61007b31 571 } else {
a03ef88f 572 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
cab3a356
KW
573 rwco->qiov->size, rwco->qiov,
574 rwco->flags);
61007b31
SH
575 }
576}
577
578/*
579 * Process a vectored synchronous request using coroutines
580 */
e293b7a3 581static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
61007b31
SH
582 QEMUIOVector *qiov, bool is_write,
583 BdrvRequestFlags flags)
584{
585 Coroutine *co;
586 RwCo rwco = {
e293b7a3 587 .child = child,
61007b31
SH
588 .offset = offset,
589 .qiov = qiov,
590 .is_write = is_write,
591 .ret = NOT_DONE,
592 .flags = flags,
593 };
594
61007b31
SH
595 if (qemu_in_coroutine()) {
596 /* Fast-path if already in coroutine context */
597 bdrv_rw_co_entry(&rwco);
598 } else {
e293b7a3 599 AioContext *aio_context = bdrv_get_aio_context(child->bs);
61007b31 600
0b8b8753
PB
601 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
602 qemu_coroutine_enter(co);
61007b31
SH
603 while (rwco.ret == NOT_DONE) {
604 aio_poll(aio_context, true);
605 }
606 }
607 return rwco.ret;
608}
609
610/*
611 * Process a synchronous request using coroutines
612 */
e293b7a3 613static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
61007b31
SH
614 int nb_sectors, bool is_write, BdrvRequestFlags flags)
615{
616 QEMUIOVector qiov;
617 struct iovec iov = {
618 .iov_base = (void *)buf,
619 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
620 };
621
622 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
623 return -EINVAL;
624 }
625
626 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 627 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
61007b31
SH
628 &qiov, is_write, flags);
629}
630
631/* return < 0 if error. See bdrv_write() for the return codes */
fbcbbf4e 632int bdrv_read(BdrvChild *child, int64_t sector_num,
61007b31
SH
633 uint8_t *buf, int nb_sectors)
634{
e293b7a3 635 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
61007b31
SH
636}
637
61007b31
SH
638/* Return < 0 if error. Important errors are:
639 -EIO generic I/O error (may happen for all errors)
640 -ENOMEDIUM No media inserted.
641 -EINVAL Invalid sector number or nb_sectors
642 -EACCES Trying to write a read-only device
643*/
18d51c4b 644int bdrv_write(BdrvChild *child, int64_t sector_num,
61007b31
SH
645 const uint8_t *buf, int nb_sectors)
646{
e293b7a3 647 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
61007b31
SH
648}
649
720ff280 650int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
74021bc4 651 int count, BdrvRequestFlags flags)
61007b31 652{
74021bc4
EB
653 QEMUIOVector qiov;
654 struct iovec iov = {
655 .iov_base = NULL,
656 .iov_len = count,
657 };
658
659 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 660 return bdrv_prwv_co(child, offset, &qiov, true,
74021bc4 661 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
662}
663
664/*
74021bc4 665 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
666 * The operation is sped up by checking the block status and only writing
667 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 668 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 669 * BDRV_REQ_FUA).
61007b31
SH
670 *
671 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
672 */
720ff280 673int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31
SH
674{
675 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
720ff280 676 BlockDriverState *bs = child->bs;
67a0fd2a 677 BlockDriverState *file;
61007b31
SH
678 int n;
679
680 target_sectors = bdrv_nb_sectors(bs);
681 if (target_sectors < 0) {
682 return target_sectors;
683 }
684
685 for (;;) {
686 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
687 if (nb_sectors <= 0) {
688 return 0;
689 }
67a0fd2a 690 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
61007b31
SH
691 if (ret < 0) {
692 error_report("error getting block status at sector %" PRId64 ": %s",
693 sector_num, strerror(-ret));
694 return ret;
695 }
696 if (ret & BDRV_BLOCK_ZERO) {
697 sector_num += n;
698 continue;
699 }
720ff280 700 ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
74021bc4 701 n << BDRV_SECTOR_BITS, flags);
61007b31
SH
702 if (ret < 0) {
703 error_report("error writing zeroes at sector %" PRId64 ": %s",
704 sector_num, strerror(-ret));
705 return ret;
706 }
707 sector_num += n;
708 }
709}
710
cf2ab8fc 711int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
f1e84741
KW
712{
713 int ret;
714
e293b7a3 715 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
f1e84741
KW
716 if (ret < 0) {
717 return ret;
718 }
719
720 return qiov->size;
721}
722
cf2ab8fc 723int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
61007b31
SH
724{
725 QEMUIOVector qiov;
726 struct iovec iov = {
727 .iov_base = (void *)buf,
728 .iov_len = bytes,
729 };
61007b31
SH
730
731 if (bytes < 0) {
732 return -EINVAL;
733 }
734
735 qemu_iovec_init_external(&qiov, &iov, 1);
cf2ab8fc 736 return bdrv_preadv(child, offset, &qiov);
61007b31
SH
737}
738
d9ca2ea2 739int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
61007b31
SH
740{
741 int ret;
742
e293b7a3 743 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
61007b31
SH
744 if (ret < 0) {
745 return ret;
746 }
747
748 return qiov->size;
749}
750
d9ca2ea2 751int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
61007b31
SH
752{
753 QEMUIOVector qiov;
754 struct iovec iov = {
755 .iov_base = (void *) buf,
756 .iov_len = bytes,
757 };
758
759 if (bytes < 0) {
760 return -EINVAL;
761 }
762
763 qemu_iovec_init_external(&qiov, &iov, 1);
d9ca2ea2 764 return bdrv_pwritev(child, offset, &qiov);
61007b31
SH
765}
766
767/*
768 * Writes to the file and ensures that no writes are reordered across this
769 * request (acts as a barrier)
770 *
771 * Returns 0 on success, -errno in error cases.
772 */
d9ca2ea2
KW
773int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
774 const void *buf, int count)
61007b31
SH
775{
776 int ret;
777
d9ca2ea2 778 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
779 if (ret < 0) {
780 return ret;
781 }
782
d9ca2ea2 783 ret = bdrv_flush(child->bs);
855a6a93
KW
784 if (ret < 0) {
785 return ret;
61007b31
SH
786 }
787
788 return 0;
789}
790
08844473
KW
791typedef struct CoroutineIOCompletion {
792 Coroutine *coroutine;
793 int ret;
794} CoroutineIOCompletion;
795
796static void bdrv_co_io_em_complete(void *opaque, int ret)
797{
798 CoroutineIOCompletion *co = opaque;
799
800 co->ret = ret;
0b8b8753 801 qemu_coroutine_enter(co->coroutine);
08844473
KW
802}
803
166fe960
KW
804static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
805 uint64_t offset, uint64_t bytes,
806 QEMUIOVector *qiov, int flags)
807{
808 BlockDriver *drv = bs->drv;
3fb06697
KW
809 int64_t sector_num;
810 unsigned int nb_sectors;
811
fa166538
EB
812 assert(!(flags & ~BDRV_REQ_MASK));
813
3fb06697
KW
814 if (drv->bdrv_co_preadv) {
815 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
816 }
817
818 sector_num = offset >> BDRV_SECTOR_BITS;
819 nb_sectors = bytes >> BDRV_SECTOR_BITS;
166fe960
KW
820
821 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
822 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
823 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
824
08844473
KW
825 if (drv->bdrv_co_readv) {
826 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
827 } else {
828 BlockAIOCB *acb;
829 CoroutineIOCompletion co = {
830 .coroutine = qemu_coroutine_self(),
831 };
832
833 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
834 bdrv_co_io_em_complete, &co);
835 if (acb == NULL) {
836 return -EIO;
837 } else {
838 qemu_coroutine_yield();
839 return co.ret;
840 }
841 }
166fe960
KW
842}
843
78a07294
KW
844static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
845 uint64_t offset, uint64_t bytes,
846 QEMUIOVector *qiov, int flags)
847{
848 BlockDriver *drv = bs->drv;
3fb06697
KW
849 int64_t sector_num;
850 unsigned int nb_sectors;
78a07294
KW
851 int ret;
852
fa166538
EB
853 assert(!(flags & ~BDRV_REQ_MASK));
854
3fb06697 855 if (drv->bdrv_co_pwritev) {
515c2f43
KW
856 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
857 flags & bs->supported_write_flags);
858 flags &= ~bs->supported_write_flags;
3fb06697
KW
859 goto emulate_flags;
860 }
861
862 sector_num = offset >> BDRV_SECTOR_BITS;
863 nb_sectors = bytes >> BDRV_SECTOR_BITS;
864
78a07294
KW
865 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
866 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
867 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
868
869 if (drv->bdrv_co_writev_flags) {
870 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
4df863f3
EB
871 flags & bs->supported_write_flags);
872 flags &= ~bs->supported_write_flags;
08844473 873 } else if (drv->bdrv_co_writev) {
4df863f3 874 assert(!bs->supported_write_flags);
78a07294 875 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
08844473
KW
876 } else {
877 BlockAIOCB *acb;
878 CoroutineIOCompletion co = {
879 .coroutine = qemu_coroutine_self(),
880 };
881
882 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
883 bdrv_co_io_em_complete, &co);
884 if (acb == NULL) {
3fb06697 885 ret = -EIO;
08844473
KW
886 } else {
887 qemu_coroutine_yield();
3fb06697 888 ret = co.ret;
08844473 889 }
78a07294
KW
890 }
891
3fb06697 892emulate_flags:
4df863f3 893 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
894 ret = bdrv_co_flush(bs);
895 }
896
897 return ret;
898}
899
61007b31 900static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
244483e6 901 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
61007b31
SH
902{
903 /* Perform I/O through a temporary buffer so that users who scribble over
904 * their read buffer while the operation is in progress do not end up
905 * modifying the image file. This is critical for zero-copy guest I/O
906 * where anything might happen inside guest memory.
907 */
908 void *bounce_buffer;
909
910 BlockDriver *drv = bs->drv;
911 struct iovec iov;
912 QEMUIOVector bounce_qiov;
244483e6
KW
913 int64_t cluster_offset;
914 unsigned int cluster_bytes;
61007b31
SH
915 size_t skip_bytes;
916 int ret;
917
918 /* Cover entire cluster so no additional backing file I/O is required when
919 * allocating cluster in the image file.
920 */
244483e6 921 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
61007b31 922
244483e6
KW
923 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
924 cluster_offset, cluster_bytes);
61007b31 925
244483e6 926 iov.iov_len = cluster_bytes;
61007b31
SH
927 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
928 if (bounce_buffer == NULL) {
929 ret = -ENOMEM;
930 goto err;
931 }
932
933 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
934
244483e6 935 ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
166fe960 936 &bounce_qiov, 0);
61007b31
SH
937 if (ret < 0) {
938 goto err;
939 }
940
c1499a5e 941 if (drv->bdrv_co_pwrite_zeroes &&
61007b31 942 buffer_is_zero(bounce_buffer, iov.iov_len)) {
a604fa2b
EB
943 /* FIXME: Should we (perhaps conditionally) be setting
944 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
945 * that still correctly reads as zero? */
244483e6 946 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
61007b31
SH
947 } else {
948 /* This does not change the data on the disk, it is not necessary
949 * to flush even in cache=writethrough mode.
950 */
244483e6 951 ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
78a07294 952 &bounce_qiov, 0);
61007b31
SH
953 }
954
955 if (ret < 0) {
956 /* It might be okay to ignore write errors for guest requests. If this
957 * is a deliberate copy-on-read then we don't want to ignore the error.
958 * Simply report it in all cases.
959 */
960 goto err;
961 }
962
244483e6
KW
963 skip_bytes = offset - cluster_offset;
964 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
61007b31
SH
965
966err:
967 qemu_vfree(bounce_buffer);
968 return ret;
969}
970
971/*
972 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
973 * handles copy on read, zeroing after EOF, and fragmentation of large
974 * reads; any other features must be implemented by the caller.
61007b31
SH
975 */
976static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
977 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
978 int64_t align, QEMUIOVector *qiov, int flags)
979{
c9d20029 980 int64_t total_bytes, max_bytes;
1a62d0ac
EB
981 int ret = 0;
982 uint64_t bytes_remaining = bytes;
983 int max_transfer;
61007b31 984
49c07526
KW
985 assert(is_power_of_2(align));
986 assert((offset & (align - 1)) == 0);
987 assert((bytes & (align - 1)) == 0);
61007b31 988 assert(!qiov || bytes == qiov->size);
abb06c5a 989 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
990 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
991 align);
a604fa2b
EB
992
993 /* TODO: We would need a per-BDS .supported_read_flags and
994 * potential fallback support, if we ever implement any read flags
995 * to pass through to drivers. For now, there aren't any
996 * passthrough flags. */
997 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
61007b31
SH
998
999 /* Handle Copy on Read and associated serialisation */
1000 if (flags & BDRV_REQ_COPY_ON_READ) {
1001 /* If we touch the same cluster it counts as an overlap. This
1002 * guarantees that allocating writes will be serialized and not race
1003 * with each other for the same cluster. For example, in copy-on-read
1004 * it ensures that the CoR read and write operations are atomic and
1005 * guest writes cannot interleave between them. */
1006 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1007 }
1008
61408b25
FZ
1009 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1010 wait_serialising_requests(req);
1011 }
61007b31
SH
1012
1013 if (flags & BDRV_REQ_COPY_ON_READ) {
49c07526
KW
1014 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1015 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1016 unsigned int nb_sectors = end_sector - start_sector;
61007b31
SH
1017 int pnum;
1018
49c07526 1019 ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
61007b31
SH
1020 if (ret < 0) {
1021 goto out;
1022 }
1023
1024 if (!ret || pnum != nb_sectors) {
244483e6 1025 ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
61007b31
SH
1026 goto out;
1027 }
1028 }
1029
1a62d0ac 1030 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1031 total_bytes = bdrv_getlength(bs);
1032 if (total_bytes < 0) {
1033 ret = total_bytes;
1034 goto out;
1035 }
61007b31 1036
c9d20029 1037 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1038 if (bytes <= max_bytes && bytes <= max_transfer) {
c9d20029 1039 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1a62d0ac
EB
1040 goto out;
1041 }
61007b31 1042
1a62d0ac
EB
1043 while (bytes_remaining) {
1044 int num;
61007b31 1045
1a62d0ac
EB
1046 if (max_bytes) {
1047 QEMUIOVector local_qiov;
61007b31 1048
1a62d0ac
EB
1049 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1050 assert(num);
1051 qemu_iovec_init(&local_qiov, qiov->niov);
1052 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
61007b31 1053
1a62d0ac
EB
1054 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1055 num, &local_qiov, 0);
1056 max_bytes -= num;
1057 qemu_iovec_destroy(&local_qiov);
1058 } else {
1059 num = bytes_remaining;
1060 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1061 bytes_remaining);
1062 }
1063 if (ret < 0) {
1064 goto out;
1065 }
1066 bytes_remaining -= num;
61007b31
SH
1067 }
1068
1069out:
1a62d0ac 1070 return ret < 0 ? ret : 0;
61007b31
SH
1071}
1072
61007b31
SH
1073/*
1074 * Handle a read request in coroutine context
1075 */
a03ef88f 1076int coroutine_fn bdrv_co_preadv(BdrvChild *child,
61007b31
SH
1077 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1078 BdrvRequestFlags flags)
1079{
a03ef88f 1080 BlockDriverState *bs = child->bs;
61007b31
SH
1081 BlockDriver *drv = bs->drv;
1082 BdrvTrackedRequest req;
1083
a5b8dd2c 1084 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1085 uint8_t *head_buf = NULL;
1086 uint8_t *tail_buf = NULL;
1087 QEMUIOVector local_qiov;
1088 bool use_local_qiov = false;
1089 int ret;
1090
1091 if (!drv) {
1092 return -ENOMEDIUM;
1093 }
1094
1095 ret = bdrv_check_byte_request(bs, offset, bytes);
1096 if (ret < 0) {
1097 return ret;
1098 }
1099
9568b511 1100 /* Don't do copy-on-read if we read data before write operation */
61408b25 1101 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
61007b31
SH
1102 flags |= BDRV_REQ_COPY_ON_READ;
1103 }
1104
61007b31
SH
1105 /* Align read if necessary by padding qiov */
1106 if (offset & (align - 1)) {
1107 head_buf = qemu_blockalign(bs, align);
1108 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1109 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1110 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1111 use_local_qiov = true;
1112
1113 bytes += offset & (align - 1);
1114 offset = offset & ~(align - 1);
1115 }
1116
1117 if ((offset + bytes) & (align - 1)) {
1118 if (!use_local_qiov) {
1119 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1120 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1121 use_local_qiov = true;
1122 }
1123 tail_buf = qemu_blockalign(bs, align);
1124 qemu_iovec_add(&local_qiov, tail_buf,
1125 align - ((offset + bytes) & (align - 1)));
1126
1127 bytes = ROUND_UP(bytes, align);
1128 }
1129
ebde595c 1130 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
61007b31
SH
1131 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1132 use_local_qiov ? &local_qiov : qiov,
1133 flags);
1134 tracked_request_end(&req);
1135
1136 if (use_local_qiov) {
1137 qemu_iovec_destroy(&local_qiov);
1138 qemu_vfree(head_buf);
1139 qemu_vfree(tail_buf);
1140 }
1141
1142 return ret;
1143}
1144
adad6496 1145static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
61007b31
SH
1146 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1147 BdrvRequestFlags flags)
1148{
1149 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1150 return -EINVAL;
1151 }
1152
a03ef88f 1153 return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1154 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1155}
1156
28b04a8f
KW
1157int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1158 int nb_sectors, QEMUIOVector *qiov)
61007b31 1159{
28b04a8f 1160 trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
61007b31 1161
adad6496 1162 return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1163}
1164
5def6b80
EB
1165/* Maximum buffer for write zeroes fallback, in bytes */
1166#define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
61007b31 1167
d05aa8bb
EB
1168static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1169 int64_t offset, int count, BdrvRequestFlags flags)
61007b31
SH
1170{
1171 BlockDriver *drv = bs->drv;
1172 QEMUIOVector qiov;
1173 struct iovec iov = {0};
1174 int ret = 0;
465fe887 1175 bool need_flush = false;
443668ca
DL
1176 int head = 0;
1177 int tail = 0;
61007b31 1178
cf081fca 1179 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1180 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1181 bs->bl.request_alignment);
d05aa8bb
EB
1182
1183 assert(is_power_of_2(alignment));
1184 head = offset & (alignment - 1);
1185 tail = (offset + count) & (alignment - 1);
1186 max_write_zeroes &= ~(alignment - 1);
61007b31 1187
d05aa8bb
EB
1188 while (count > 0 && !ret) {
1189 int num = count;
61007b31
SH
1190
1191 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1192 * to be aligned, and that unaligned requests do not cross cluster
1193 * boundaries.
61007b31 1194 */
443668ca
DL
1195 if (head) {
1196 /* Make a small request up to the first aligned sector. */
d05aa8bb 1197 num = MIN(count, alignment - head);
443668ca 1198 head = 0;
d05aa8bb 1199 } else if (tail && num > alignment) {
443668ca
DL
1200 /* Shorten the request to the last aligned sector. */
1201 num -= tail;
61007b31
SH
1202 }
1203
1204 /* limit request size */
1205 if (num > max_write_zeroes) {
1206 num = max_write_zeroes;
1207 }
1208
1209 ret = -ENOTSUP;
1210 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1211 if (drv->bdrv_co_pwrite_zeroes) {
1212 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1213 flags & bs->supported_zero_flags);
1214 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1215 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1216 need_flush = true;
1217 }
465fe887
EB
1218 } else {
1219 assert(!bs->supported_zero_flags);
61007b31
SH
1220 }
1221
1222 if (ret == -ENOTSUP) {
1223 /* Fall back to bounce buffer if write zeroes is unsupported */
5def6b80 1224 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
61007b31 1225 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
465fe887
EB
1226 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1227
1228 if ((flags & BDRV_REQ_FUA) &&
1229 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1230 /* No need for bdrv_driver_pwrite() to do a fallback
1231 * flush on each chunk; use just one at the end */
1232 write_flags &= ~BDRV_REQ_FUA;
1233 need_flush = true;
1234 }
5def6b80 1235 num = MIN(num, max_transfer);
d05aa8bb 1236 iov.iov_len = num;
61007b31 1237 if (iov.iov_base == NULL) {
d05aa8bb 1238 iov.iov_base = qemu_try_blockalign(bs, num);
61007b31
SH
1239 if (iov.iov_base == NULL) {
1240 ret = -ENOMEM;
1241 goto fail;
1242 }
d05aa8bb 1243 memset(iov.iov_base, 0, num);
61007b31
SH
1244 }
1245 qemu_iovec_init_external(&qiov, &iov, 1);
1246
d05aa8bb 1247 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
61007b31
SH
1248
1249 /* Keep bounce buffer around if it is big enough for all
1250 * all future requests.
1251 */
5def6b80 1252 if (num < max_transfer) {
61007b31
SH
1253 qemu_vfree(iov.iov_base);
1254 iov.iov_base = NULL;
1255 }
1256 }
1257
d05aa8bb
EB
1258 offset += num;
1259 count -= num;
61007b31
SH
1260 }
1261
1262fail:
465fe887
EB
1263 if (ret == 0 && need_flush) {
1264 ret = bdrv_co_flush(bs);
1265 }
61007b31
SH
1266 qemu_vfree(iov.iov_base);
1267 return ret;
1268}
1269
1270/*
04ed95f4
EB
1271 * Forwards an already correctly aligned write request to the BlockDriver,
1272 * after possibly fragmenting it.
61007b31
SH
1273 */
1274static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1275 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
cff86b38 1276 int64_t align, QEMUIOVector *qiov, int flags)
61007b31
SH
1277{
1278 BlockDriver *drv = bs->drv;
1279 bool waited;
1280 int ret;
1281
9896c876
KW
1282 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1283 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
04ed95f4
EB
1284 uint64_t bytes_remaining = bytes;
1285 int max_transfer;
61007b31 1286
cff86b38
EB
1287 assert(is_power_of_2(align));
1288 assert((offset & (align - 1)) == 0);
1289 assert((bytes & (align - 1)) == 0);
61007b31 1290 assert(!qiov || bytes == qiov->size);
abb06c5a 1291 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
fa166538 1292 assert(!(flags & ~BDRV_REQ_MASK));
04ed95f4
EB
1293 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1294 align);
61007b31
SH
1295
1296 waited = wait_serialising_requests(req);
1297 assert(!waited || !req->serialising);
1298 assert(req->overlap_offset <= offset);
1299 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1300
1301 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1302
1303 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 1304 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
61007b31
SH
1305 qemu_iovec_is_zero(qiov)) {
1306 flags |= BDRV_REQ_ZERO_WRITE;
1307 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1308 flags |= BDRV_REQ_MAY_UNMAP;
1309 }
1310 }
1311
1312 if (ret < 0) {
1313 /* Do nothing, write notifier decided to fail this request */
1314 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 1315 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 1316 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
04ed95f4 1317 } else if (bytes <= max_transfer) {
9a4f4c31 1318 bdrv_debug_event(bs, BLKDBG_PWRITEV);
78a07294 1319 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
04ed95f4
EB
1320 } else {
1321 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1322 while (bytes_remaining) {
1323 int num = MIN(bytes_remaining, max_transfer);
1324 QEMUIOVector local_qiov;
1325 int local_flags = flags;
1326
1327 assert(num);
1328 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1329 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1330 /* If FUA is going to be emulated by flush, we only
1331 * need to flush on the last iteration */
1332 local_flags &= ~BDRV_REQ_FUA;
1333 }
1334 qemu_iovec_init(&local_qiov, qiov->niov);
1335 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1336
1337 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1338 num, &local_qiov, local_flags);
1339 qemu_iovec_destroy(&local_qiov);
1340 if (ret < 0) {
1341 break;
1342 }
1343 bytes_remaining -= num;
1344 }
61007b31 1345 }
9a4f4c31 1346 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 1347
3ff2f67a 1348 ++bs->write_gen;
9896c876 1349 bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
61007b31 1350
53d8f9d8
HR
1351 if (bs->wr_highest_offset < offset + bytes) {
1352 bs->wr_highest_offset = offset + bytes;
1353 }
61007b31
SH
1354
1355 if (ret >= 0) {
9896c876 1356 bs->total_sectors = MAX(bs->total_sectors, end_sector);
04ed95f4 1357 ret = 0;
61007b31
SH
1358 }
1359
1360 return ret;
1361}
1362
9eeb6dd1
FZ
1363static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1364 int64_t offset,
1365 unsigned int bytes,
1366 BdrvRequestFlags flags,
1367 BdrvTrackedRequest *req)
1368{
1369 uint8_t *buf = NULL;
1370 QEMUIOVector local_qiov;
1371 struct iovec iov;
a5b8dd2c 1372 uint64_t align = bs->bl.request_alignment;
9eeb6dd1
FZ
1373 unsigned int head_padding_bytes, tail_padding_bytes;
1374 int ret = 0;
1375
1376 head_padding_bytes = offset & (align - 1);
1377 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1378
1379
1380 assert(flags & BDRV_REQ_ZERO_WRITE);
1381 if (head_padding_bytes || tail_padding_bytes) {
1382 buf = qemu_blockalign(bs, align);
1383 iov = (struct iovec) {
1384 .iov_base = buf,
1385 .iov_len = align,
1386 };
1387 qemu_iovec_init_external(&local_qiov, &iov, 1);
1388 }
1389 if (head_padding_bytes) {
1390 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1391
1392 /* RMW the unaligned part before head. */
1393 mark_request_serialising(req, align);
1394 wait_serialising_requests(req);
9a4f4c31 1395 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
9eeb6dd1
FZ
1396 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1397 align, &local_qiov, 0);
1398 if (ret < 0) {
1399 goto fail;
1400 }
9a4f4c31 1401 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
9eeb6dd1
FZ
1402
1403 memset(buf + head_padding_bytes, 0, zero_bytes);
1404 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
cff86b38 1405 align, &local_qiov,
9eeb6dd1
FZ
1406 flags & ~BDRV_REQ_ZERO_WRITE);
1407 if (ret < 0) {
1408 goto fail;
1409 }
1410 offset += zero_bytes;
1411 bytes -= zero_bytes;
1412 }
1413
1414 assert(!bytes || (offset & (align - 1)) == 0);
1415 if (bytes >= align) {
1416 /* Write the aligned part in the middle. */
1417 uint64_t aligned_bytes = bytes & ~(align - 1);
cff86b38 1418 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
9eeb6dd1
FZ
1419 NULL, flags);
1420 if (ret < 0) {
1421 goto fail;
1422 }
1423 bytes -= aligned_bytes;
1424 offset += aligned_bytes;
1425 }
1426
1427 assert(!bytes || (offset & (align - 1)) == 0);
1428 if (bytes) {
1429 assert(align == tail_padding_bytes + bytes);
1430 /* RMW the unaligned part after tail. */
1431 mark_request_serialising(req, align);
1432 wait_serialising_requests(req);
9a4f4c31 1433 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
9eeb6dd1
FZ
1434 ret = bdrv_aligned_preadv(bs, req, offset, align,
1435 align, &local_qiov, 0);
1436 if (ret < 0) {
1437 goto fail;
1438 }
9a4f4c31 1439 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
9eeb6dd1
FZ
1440
1441 memset(buf, 0, bytes);
cff86b38 1442 ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
9eeb6dd1
FZ
1443 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1444 }
1445fail:
1446 qemu_vfree(buf);
1447 return ret;
1448
1449}
1450
61007b31
SH
1451/*
1452 * Handle a write request in coroutine context
1453 */
a03ef88f 1454int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
61007b31
SH
1455 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1456 BdrvRequestFlags flags)
1457{
a03ef88f 1458 BlockDriverState *bs = child->bs;
61007b31 1459 BdrvTrackedRequest req;
a5b8dd2c 1460 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1461 uint8_t *head_buf = NULL;
1462 uint8_t *tail_buf = NULL;
1463 QEMUIOVector local_qiov;
1464 bool use_local_qiov = false;
1465 int ret;
1466
1467 if (!bs->drv) {
1468 return -ENOMEDIUM;
1469 }
1470 if (bs->read_only) {
eaf5fe2d 1471 return -EPERM;
61007b31 1472 }
04c01a5c 1473 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31
SH
1474
1475 ret = bdrv_check_byte_request(bs, offset, bytes);
1476 if (ret < 0) {
1477 return ret;
1478 }
1479
61007b31
SH
1480 /*
1481 * Align write if necessary by performing a read-modify-write cycle.
1482 * Pad qiov with the read parts and be sure to have a tracked request not
1483 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1484 */
ebde595c 1485 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 1486
9eeb6dd1
FZ
1487 if (!qiov) {
1488 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1489 goto out;
1490 }
1491
61007b31
SH
1492 if (offset & (align - 1)) {
1493 QEMUIOVector head_qiov;
1494 struct iovec head_iov;
1495
1496 mark_request_serialising(&req, align);
1497 wait_serialising_requests(&req);
1498
1499 head_buf = qemu_blockalign(bs, align);
1500 head_iov = (struct iovec) {
1501 .iov_base = head_buf,
1502 .iov_len = align,
1503 };
1504 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1505
9a4f4c31 1506 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
61007b31
SH
1507 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1508 align, &head_qiov, 0);
1509 if (ret < 0) {
1510 goto fail;
1511 }
9a4f4c31 1512 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
61007b31
SH
1513
1514 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1515 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1516 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1517 use_local_qiov = true;
1518
1519 bytes += offset & (align - 1);
1520 offset = offset & ~(align - 1);
117bc3fa
PL
1521
1522 /* We have read the tail already if the request is smaller
1523 * than one aligned block.
1524 */
1525 if (bytes < align) {
1526 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1527 bytes = align;
1528 }
61007b31
SH
1529 }
1530
1531 if ((offset + bytes) & (align - 1)) {
1532 QEMUIOVector tail_qiov;
1533 struct iovec tail_iov;
1534 size_t tail_bytes;
1535 bool waited;
1536
1537 mark_request_serialising(&req, align);
1538 waited = wait_serialising_requests(&req);
1539 assert(!waited || !use_local_qiov);
1540
1541 tail_buf = qemu_blockalign(bs, align);
1542 tail_iov = (struct iovec) {
1543 .iov_base = tail_buf,
1544 .iov_len = align,
1545 };
1546 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1547
9a4f4c31 1548 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
61007b31
SH
1549 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1550 align, &tail_qiov, 0);
1551 if (ret < 0) {
1552 goto fail;
1553 }
9a4f4c31 1554 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
61007b31
SH
1555
1556 if (!use_local_qiov) {
1557 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1558 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1559 use_local_qiov = true;
1560 }
1561
1562 tail_bytes = (offset + bytes) & (align - 1);
1563 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1564
1565 bytes = ROUND_UP(bytes, align);
1566 }
1567
cff86b38 1568 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
61007b31
SH
1569 use_local_qiov ? &local_qiov : qiov,
1570 flags);
1571
1572fail:
61007b31
SH
1573
1574 if (use_local_qiov) {
1575 qemu_iovec_destroy(&local_qiov);
1576 }
1577 qemu_vfree(head_buf);
1578 qemu_vfree(tail_buf);
9eeb6dd1
FZ
1579out:
1580 tracked_request_end(&req);
61007b31
SH
1581 return ret;
1582}
1583
adad6496 1584static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
61007b31
SH
1585 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1586 BdrvRequestFlags flags)
1587{
1588 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1589 return -EINVAL;
1590 }
1591
a03ef88f 1592 return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1593 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1594}
1595
25ec177d 1596int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
61007b31
SH
1597 int nb_sectors, QEMUIOVector *qiov)
1598{
25ec177d 1599 trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
61007b31 1600
adad6496 1601 return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1602}
1603
a03ef88f
KW
1604int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1605 int count, BdrvRequestFlags flags)
61007b31 1606{
a03ef88f 1607 trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
61007b31 1608
a03ef88f 1609 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
1610 flags &= ~BDRV_REQ_MAY_UNMAP;
1611 }
61007b31 1612
a03ef88f 1613 return bdrv_co_pwritev(child, offset, count, NULL,
74021bc4 1614 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
1615}
1616
61007b31
SH
1617typedef struct BdrvCoGetBlockStatusData {
1618 BlockDriverState *bs;
1619 BlockDriverState *base;
67a0fd2a 1620 BlockDriverState **file;
61007b31
SH
1621 int64_t sector_num;
1622 int nb_sectors;
1623 int *pnum;
1624 int64_t ret;
1625 bool done;
1626} BdrvCoGetBlockStatusData;
1627
1628/*
1629 * Returns the allocation status of the specified sectors.
1630 * Drivers not implementing the functionality are assumed to not support
1631 * backing files, hence all their sectors are reported as allocated.
1632 *
1633 * If 'sector_num' is beyond the end of the disk image the return value is 0
1634 * and 'pnum' is set to 0.
1635 *
1636 * 'pnum' is set to the number of sectors (including and immediately following
1637 * the specified sector) that are known to be in the same
1638 * allocated/unallocated state.
1639 *
1640 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1641 * beyond the end of the disk image it will be clamped.
67a0fd2a
FZ
1642 *
1643 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1644 * points to the BDS which the sector range is allocated in.
61007b31
SH
1645 */
1646static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1647 int64_t sector_num,
67a0fd2a
FZ
1648 int nb_sectors, int *pnum,
1649 BlockDriverState **file)
61007b31
SH
1650{
1651 int64_t total_sectors;
1652 int64_t n;
1653 int64_t ret, ret2;
1654
1655 total_sectors = bdrv_nb_sectors(bs);
1656 if (total_sectors < 0) {
1657 return total_sectors;
1658 }
1659
1660 if (sector_num >= total_sectors) {
1661 *pnum = 0;
1662 return 0;
1663 }
1664
1665 n = total_sectors - sector_num;
1666 if (n < nb_sectors) {
1667 nb_sectors = n;
1668 }
1669
1670 if (!bs->drv->bdrv_co_get_block_status) {
1671 *pnum = nb_sectors;
1672 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1673 if (bs->drv->protocol_name) {
1674 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1675 }
1676 return ret;
1677 }
1678
67a0fd2a
FZ
1679 *file = NULL;
1680 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1681 file);
61007b31
SH
1682 if (ret < 0) {
1683 *pnum = 0;
1684 return ret;
1685 }
1686
1687 if (ret & BDRV_BLOCK_RAW) {
1688 assert(ret & BDRV_BLOCK_OFFSET_VALID);
9a4f4c31 1689 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
67a0fd2a 1690 *pnum, pnum, file);
61007b31
SH
1691 }
1692
1693 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1694 ret |= BDRV_BLOCK_ALLOCATED;
a53f1a95 1695 } else {
61007b31
SH
1696 if (bdrv_unallocated_blocks_are_zero(bs)) {
1697 ret |= BDRV_BLOCK_ZERO;
760e0063
KW
1698 } else if (bs->backing) {
1699 BlockDriverState *bs2 = bs->backing->bs;
61007b31
SH
1700 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1701 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1702 ret |= BDRV_BLOCK_ZERO;
1703 }
1704 }
1705 }
1706
ac987b30 1707 if (*file && *file != bs &&
61007b31
SH
1708 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1709 (ret & BDRV_BLOCK_OFFSET_VALID)) {
67a0fd2a 1710 BlockDriverState *file2;
61007b31
SH
1711 int file_pnum;
1712
ac987b30 1713 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
67a0fd2a 1714 *pnum, &file_pnum, &file2);
61007b31
SH
1715 if (ret2 >= 0) {
1716 /* Ignore errors. This is just providing extra information, it
1717 * is useful but not necessary.
1718 */
1719 if (!file_pnum) {
1720 /* !file_pnum indicates an offset at or beyond the EOF; it is
1721 * perfectly valid for the format block driver to point to such
1722 * offsets, so catch it and mark everything as zero */
1723 ret |= BDRV_BLOCK_ZERO;
1724 } else {
1725 /* Limit request to the range reported by the protocol driver */
1726 *pnum = file_pnum;
1727 ret |= (ret2 & BDRV_BLOCK_ZERO);
1728 }
1729 }
1730 }
1731
1732 return ret;
1733}
1734
ba3f0e25
FZ
1735static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1736 BlockDriverState *base,
1737 int64_t sector_num,
1738 int nb_sectors,
67a0fd2a
FZ
1739 int *pnum,
1740 BlockDriverState **file)
ba3f0e25
FZ
1741{
1742 BlockDriverState *p;
1743 int64_t ret = 0;
1744
1745 assert(bs != base);
760e0063 1746 for (p = bs; p != base; p = backing_bs(p)) {
67a0fd2a 1747 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
ba3f0e25
FZ
1748 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1749 break;
1750 }
1751 /* [sector_num, pnum] unallocated on this layer, which could be only
1752 * the first part of [sector_num, nb_sectors]. */
1753 nb_sectors = MIN(nb_sectors, *pnum);
1754 }
1755 return ret;
1756}
1757
1758/* Coroutine wrapper for bdrv_get_block_status_above() */
1759static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
61007b31
SH
1760{
1761 BdrvCoGetBlockStatusData *data = opaque;
61007b31 1762
ba3f0e25
FZ
1763 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1764 data->sector_num,
1765 data->nb_sectors,
67a0fd2a
FZ
1766 data->pnum,
1767 data->file);
61007b31
SH
1768 data->done = true;
1769}
1770
1771/*
ba3f0e25 1772 * Synchronous wrapper around bdrv_co_get_block_status_above().
61007b31 1773 *
ba3f0e25 1774 * See bdrv_co_get_block_status_above() for details.
61007b31 1775 */
ba3f0e25
FZ
1776int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1777 BlockDriverState *base,
1778 int64_t sector_num,
67a0fd2a
FZ
1779 int nb_sectors, int *pnum,
1780 BlockDriverState **file)
61007b31
SH
1781{
1782 Coroutine *co;
1783 BdrvCoGetBlockStatusData data = {
1784 .bs = bs,
ba3f0e25 1785 .base = base,
67a0fd2a 1786 .file = file,
61007b31
SH
1787 .sector_num = sector_num,
1788 .nb_sectors = nb_sectors,
1789 .pnum = pnum,
1790 .done = false,
1791 };
1792
1793 if (qemu_in_coroutine()) {
1794 /* Fast-path if already in coroutine context */
ba3f0e25 1795 bdrv_get_block_status_above_co_entry(&data);
61007b31
SH
1796 } else {
1797 AioContext *aio_context = bdrv_get_aio_context(bs);
1798
0b8b8753
PB
1799 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1800 &data);
1801 qemu_coroutine_enter(co);
61007b31
SH
1802 while (!data.done) {
1803 aio_poll(aio_context, true);
1804 }
1805 }
1806 return data.ret;
1807}
1808
ba3f0e25
FZ
1809int64_t bdrv_get_block_status(BlockDriverState *bs,
1810 int64_t sector_num,
67a0fd2a
FZ
1811 int nb_sectors, int *pnum,
1812 BlockDriverState **file)
ba3f0e25 1813{
760e0063 1814 return bdrv_get_block_status_above(bs, backing_bs(bs),
67a0fd2a 1815 sector_num, nb_sectors, pnum, file);
ba3f0e25
FZ
1816}
1817
61007b31
SH
1818int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1819 int nb_sectors, int *pnum)
1820{
67a0fd2a
FZ
1821 BlockDriverState *file;
1822 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1823 &file);
61007b31
SH
1824 if (ret < 0) {
1825 return ret;
1826 }
1827 return !!(ret & BDRV_BLOCK_ALLOCATED);
1828}
1829
1830/*
1831 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1832 *
1833 * Return true if the given sector is allocated in any image between
1834 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1835 * sector is allocated in any image of the chain. Return false otherwise.
1836 *
1837 * 'pnum' is set to the number of sectors (including and immediately following
1838 * the specified sector) that are known to be in the same
1839 * allocated/unallocated state.
1840 *
1841 */
1842int bdrv_is_allocated_above(BlockDriverState *top,
1843 BlockDriverState *base,
1844 int64_t sector_num,
1845 int nb_sectors, int *pnum)
1846{
1847 BlockDriverState *intermediate;
1848 int ret, n = nb_sectors;
1849
1850 intermediate = top;
1851 while (intermediate && intermediate != base) {
1852 int pnum_inter;
1853 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1854 &pnum_inter);
1855 if (ret < 0) {
1856 return ret;
1857 } else if (ret) {
1858 *pnum = pnum_inter;
1859 return 1;
1860 }
1861
1862 /*
1863 * [sector_num, nb_sectors] is unallocated on top but intermediate
1864 * might have
1865 *
1866 * [sector_num+x, nr_sectors] allocated.
1867 */
1868 if (n > pnum_inter &&
1869 (intermediate == top ||
1870 sector_num + pnum_inter < intermediate->total_sectors)) {
1871 n = pnum_inter;
1872 }
1873
760e0063 1874 intermediate = backing_bs(intermediate);
61007b31
SH
1875 }
1876
1877 *pnum = n;
1878 return 0;
1879}
1880
1881int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1882 const uint8_t *buf, int nb_sectors)
1883{
1884 BlockDriver *drv = bs->drv;
1885 int ret;
1886
1887 if (!drv) {
1888 return -ENOMEDIUM;
1889 }
1890 if (!drv->bdrv_write_compressed) {
1891 return -ENOTSUP;
1892 }
1893 ret = bdrv_check_request(bs, sector_num, nb_sectors);
1894 if (ret < 0) {
1895 return ret;
1896 }
1897
1898 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1899
1900 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1901}
1902
1a8ae822
KW
1903typedef struct BdrvVmstateCo {
1904 BlockDriverState *bs;
1905 QEMUIOVector *qiov;
1906 int64_t pos;
1907 bool is_read;
1908 int ret;
1909} BdrvVmstateCo;
1910
1911static int coroutine_fn
1912bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1913 bool is_read)
1914{
1915 BlockDriver *drv = bs->drv;
1916
1917 if (!drv) {
1918 return -ENOMEDIUM;
1919 } else if (drv->bdrv_load_vmstate) {
1920 return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
1921 : drv->bdrv_save_vmstate(bs, qiov, pos);
1922 } else if (bs->file) {
1923 return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1924 }
1925
1926 return -ENOTSUP;
1927}
1928
1929static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
1930{
1931 BdrvVmstateCo *co = opaque;
1932 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
1933}
1934
1935static inline int
1936bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1937 bool is_read)
1938{
1939 if (qemu_in_coroutine()) {
1940 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
1941 } else {
1942 BdrvVmstateCo data = {
1943 .bs = bs,
1944 .qiov = qiov,
1945 .pos = pos,
1946 .is_read = is_read,
1947 .ret = -EINPROGRESS,
1948 };
0b8b8753 1949 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1a8ae822 1950
0b8b8753 1951 qemu_coroutine_enter(co);
1a8ae822
KW
1952 while (data.ret == -EINPROGRESS) {
1953 aio_poll(bdrv_get_aio_context(bs), true);
1954 }
1955 return data.ret;
1956 }
1957}
1958
61007b31
SH
1959int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1960 int64_t pos, int size)
1961{
1962 QEMUIOVector qiov;
1963 struct iovec iov = {
1964 .iov_base = (void *) buf,
1965 .iov_len = size,
1966 };
b433d942 1967 int ret;
61007b31
SH
1968
1969 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
1970
1971 ret = bdrv_writev_vmstate(bs, &qiov, pos);
1972 if (ret < 0) {
1973 return ret;
1974 }
1975
1976 return size;
61007b31
SH
1977}
1978
1979int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1980{
1a8ae822 1981 return bdrv_rw_vmstate(bs, qiov, pos, false);
61007b31
SH
1982}
1983
1984int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1985 int64_t pos, int size)
5ddda0b8
KW
1986{
1987 QEMUIOVector qiov;
1988 struct iovec iov = {
1989 .iov_base = buf,
1990 .iov_len = size,
1991 };
b433d942 1992 int ret;
5ddda0b8
KW
1993
1994 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
1995 ret = bdrv_readv_vmstate(bs, &qiov, pos);
1996 if (ret < 0) {
1997 return ret;
1998 }
1999
2000 return size;
5ddda0b8
KW
2001}
2002
2003int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2004{
1a8ae822 2005 return bdrv_rw_vmstate(bs, qiov, pos, true);
61007b31
SH
2006}
2007
2008/**************************************************************/
2009/* async I/Os */
2010
ebb7af21 2011BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
61007b31
SH
2012 QEMUIOVector *qiov, int nb_sectors,
2013 BlockCompletionFunc *cb, void *opaque)
2014{
ebb7af21 2015 trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
61007b31 2016
b15404e0
EB
2017 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2018 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2019 0, cb, opaque, false);
61007b31
SH
2020}
2021
0d1049c7 2022BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
61007b31
SH
2023 QEMUIOVector *qiov, int nb_sectors,
2024 BlockCompletionFunc *cb, void *opaque)
2025{
0d1049c7 2026 trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
61007b31 2027
b15404e0
EB
2028 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2029 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2030 0, cb, opaque, true);
61007b31
SH
2031}
2032
61007b31
SH
2033void bdrv_aio_cancel(BlockAIOCB *acb)
2034{
2035 qemu_aio_ref(acb);
2036 bdrv_aio_cancel_async(acb);
2037 while (acb->refcnt > 1) {
2038 if (acb->aiocb_info->get_aio_context) {
2039 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2040 } else if (acb->bs) {
2041 aio_poll(bdrv_get_aio_context(acb->bs), true);
2042 } else {
2043 abort();
2044 }
2045 }
2046 qemu_aio_unref(acb);
2047}
2048
2049/* Async version of aio cancel. The caller is not blocked if the acb implements
2050 * cancel_async, otherwise we do nothing and let the request normally complete.
2051 * In either case the completion callback must be called. */
2052void bdrv_aio_cancel_async(BlockAIOCB *acb)
2053{
2054 if (acb->aiocb_info->cancel_async) {
2055 acb->aiocb_info->cancel_async(acb);
2056 }
2057}
2058
2059/**************************************************************/
2060/* async block device emulation */
2061
41574268
EB
2062typedef struct BlockRequest {
2063 union {
2064 /* Used during read, write, trim */
2065 struct {
b15404e0
EB
2066 int64_t offset;
2067 int bytes;
41574268
EB
2068 int flags;
2069 QEMUIOVector *qiov;
2070 };
2071 /* Used during ioctl */
2072 struct {
2073 int req;
2074 void *buf;
2075 };
2076 };
2077 BlockCompletionFunc *cb;
2078 void *opaque;
2079
2080 int error;
2081} BlockRequest;
2082
61007b31
SH
2083typedef struct BlockAIOCBCoroutine {
2084 BlockAIOCB common;
adad6496 2085 BdrvChild *child;
61007b31
SH
2086 BlockRequest req;
2087 bool is_write;
2088 bool need_bh;
2089 bool *done;
2090 QEMUBH* bh;
2091} BlockAIOCBCoroutine;
2092
2093static const AIOCBInfo bdrv_em_co_aiocb_info = {
2094 .aiocb_size = sizeof(BlockAIOCBCoroutine),
2095};
2096
2097static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2098{
2099 if (!acb->need_bh) {
2100 acb->common.cb(acb->common.opaque, acb->req.error);
2101 qemu_aio_unref(acb);
2102 }
2103}
2104
2105static void bdrv_co_em_bh(void *opaque)
2106{
2107 BlockAIOCBCoroutine *acb = opaque;
2108
2109 assert(!acb->need_bh);
2110 qemu_bh_delete(acb->bh);
2111 bdrv_co_complete(acb);
2112}
2113
2114static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2115{
2116 acb->need_bh = false;
2117 if (acb->req.error != -EINPROGRESS) {
2118 BlockDriverState *bs = acb->common.bs;
2119
2120 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2121 qemu_bh_schedule(acb->bh);
2122 }
2123}
2124
2125/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2126static void coroutine_fn bdrv_co_do_rw(void *opaque)
2127{
2128 BlockAIOCBCoroutine *acb = opaque;
61007b31
SH
2129
2130 if (!acb->is_write) {
b15404e0
EB
2131 acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
2132 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
61007b31 2133 } else {
b15404e0
EB
2134 acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
2135 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
61007b31
SH
2136 }
2137
2138 bdrv_co_complete(acb);
2139}
2140
b15404e0
EB
2141static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
2142 int64_t offset,
2143 QEMUIOVector *qiov,
2144 BdrvRequestFlags flags,
2145 BlockCompletionFunc *cb,
2146 void *opaque,
2147 bool is_write)
61007b31
SH
2148{
2149 Coroutine *co;
2150 BlockAIOCBCoroutine *acb;
2151
adad6496
KW
2152 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
2153 acb->child = child;
61007b31
SH
2154 acb->need_bh = true;
2155 acb->req.error = -EINPROGRESS;
b15404e0 2156 acb->req.offset = offset;
61007b31
SH
2157 acb->req.qiov = qiov;
2158 acb->req.flags = flags;
2159 acb->is_write = is_write;
2160
0b8b8753
PB
2161 co = qemu_coroutine_create(bdrv_co_do_rw, acb);
2162 qemu_coroutine_enter(co);
61007b31
SH
2163
2164 bdrv_co_maybe_schedule_bh(acb);
2165 return &acb->common;
2166}
2167
2168static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2169{
2170 BlockAIOCBCoroutine *acb = opaque;
2171 BlockDriverState *bs = acb->common.bs;
2172
2173 acb->req.error = bdrv_co_flush(bs);
2174 bdrv_co_complete(acb);
2175}
2176
2177BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2178 BlockCompletionFunc *cb, void *opaque)
2179{
2180 trace_bdrv_aio_flush(bs, opaque);
2181
2182 Coroutine *co;
2183 BlockAIOCBCoroutine *acb;
2184
2185 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2186 acb->need_bh = true;
2187 acb->req.error = -EINPROGRESS;
2188
0b8b8753
PB
2189 co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
2190 qemu_coroutine_enter(co);
61007b31
SH
2191
2192 bdrv_co_maybe_schedule_bh(acb);
2193 return &acb->common;
2194}
2195
60ebac16 2196static void coroutine_fn bdrv_aio_pdiscard_co_entry(void *opaque)
61007b31
SH
2197{
2198 BlockAIOCBCoroutine *acb = opaque;
2199 BlockDriverState *bs = acb->common.bs;
2200
b15404e0 2201 acb->req.error = bdrv_co_pdiscard(bs, acb->req.offset, acb->req.bytes);
61007b31
SH
2202 bdrv_co_complete(acb);
2203}
2204
60ebac16
EB
2205BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs, int64_t offset, int count,
2206 BlockCompletionFunc *cb, void *opaque)
61007b31
SH
2207{
2208 Coroutine *co;
2209 BlockAIOCBCoroutine *acb;
2210
60ebac16 2211 trace_bdrv_aio_pdiscard(bs, offset, count, opaque);
61007b31
SH
2212
2213 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2214 acb->need_bh = true;
2215 acb->req.error = -EINPROGRESS;
60ebac16
EB
2216 acb->req.offset = offset;
2217 acb->req.bytes = count;
2218 co = qemu_coroutine_create(bdrv_aio_pdiscard_co_entry, acb);
0b8b8753 2219 qemu_coroutine_enter(co);
61007b31
SH
2220
2221 bdrv_co_maybe_schedule_bh(acb);
2222 return &acb->common;
2223}
2224
2225void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2226 BlockCompletionFunc *cb, void *opaque)
2227{
2228 BlockAIOCB *acb;
2229
c84b3192 2230 acb = g_malloc(aiocb_info->aiocb_size);
61007b31
SH
2231 acb->aiocb_info = aiocb_info;
2232 acb->bs = bs;
2233 acb->cb = cb;
2234 acb->opaque = opaque;
2235 acb->refcnt = 1;
2236 return acb;
2237}
2238
2239void qemu_aio_ref(void *p)
2240{
2241 BlockAIOCB *acb = p;
2242 acb->refcnt++;
2243}
2244
2245void qemu_aio_unref(void *p)
2246{
2247 BlockAIOCB *acb = p;
2248 assert(acb->refcnt > 0);
2249 if (--acb->refcnt == 0) {
c84b3192 2250 g_free(acb);
61007b31
SH
2251 }
2252}
2253
2254/**************************************************************/
2255/* Coroutine block device emulation */
2256
e293b7a3
KW
2257typedef struct FlushCo {
2258 BlockDriverState *bs;
2259 int ret;
2260} FlushCo;
2261
2262
61007b31
SH
2263static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2264{
e293b7a3 2265 FlushCo *rwco = opaque;
61007b31
SH
2266
2267 rwco->ret = bdrv_co_flush(rwco->bs);
2268}
2269
2270int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2271{
2272 int ret;
cdb5e315 2273 BdrvTrackedRequest req;
61007b31 2274
1b6bc94d
DA
2275 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2276 bdrv_is_sg(bs)) {
61007b31
SH
2277 return 0;
2278 }
2279
cdb5e315 2280 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
c32b82af 2281
3ff2f67a
EY
2282 int current_gen = bs->write_gen;
2283
2284 /* Wait until any previous flushes are completed */
2285 while (bs->flush_started_gen != bs->flushed_gen) {
2286 qemu_co_queue_wait(&bs->flush_queue);
2287 }
2288
2289 bs->flush_started_gen = current_gen;
2290
c32b82af
PD
2291 /* Write back all layers by calling one driver function */
2292 if (bs->drv->bdrv_co_flush) {
2293 ret = bs->drv->bdrv_co_flush(bs);
2294 goto out;
2295 }
2296
61007b31
SH
2297 /* Write back cached data to the OS even with cache=unsafe */
2298 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2299 if (bs->drv->bdrv_co_flush_to_os) {
2300 ret = bs->drv->bdrv_co_flush_to_os(bs);
2301 if (ret < 0) {
cdb5e315 2302 goto out;
61007b31
SH
2303 }
2304 }
2305
2306 /* But don't actually force it to the disk with cache=unsafe */
2307 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2308 goto flush_parent;
2309 }
2310
3ff2f67a
EY
2311 /* Check if we really need to flush anything */
2312 if (bs->flushed_gen == current_gen) {
2313 goto flush_parent;
2314 }
2315
61007b31
SH
2316 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2317 if (bs->drv->bdrv_co_flush_to_disk) {
2318 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2319 } else if (bs->drv->bdrv_aio_flush) {
2320 BlockAIOCB *acb;
2321 CoroutineIOCompletion co = {
2322 .coroutine = qemu_coroutine_self(),
2323 };
2324
2325 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2326 if (acb == NULL) {
2327 ret = -EIO;
2328 } else {
2329 qemu_coroutine_yield();
2330 ret = co.ret;
2331 }
2332 } else {
2333 /*
2334 * Some block drivers always operate in either writethrough or unsafe
2335 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2336 * know how the server works (because the behaviour is hardcoded or
2337 * depends on server-side configuration), so we can't ensure that
2338 * everything is safe on disk. Returning an error doesn't work because
2339 * that would break guests even if the server operates in writethrough
2340 * mode.
2341 *
2342 * Let's hope the user knows what he's doing.
2343 */
2344 ret = 0;
2345 }
3ff2f67a 2346
61007b31 2347 if (ret < 0) {
cdb5e315 2348 goto out;
61007b31
SH
2349 }
2350
2351 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2352 * in the case of cache=unsafe, so there are no useless flushes.
2353 */
2354flush_parent:
cdb5e315
FZ
2355 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2356out:
3ff2f67a
EY
2357 /* Notify any pending flushes that we have completed */
2358 bs->flushed_gen = current_gen;
2359 qemu_co_queue_restart_all(&bs->flush_queue);
2360
cdb5e315
FZ
2361 tracked_request_end(&req);
2362 return ret;
61007b31
SH
2363}
2364
2365int bdrv_flush(BlockDriverState *bs)
2366{
2367 Coroutine *co;
e293b7a3 2368 FlushCo flush_co = {
61007b31
SH
2369 .bs = bs,
2370 .ret = NOT_DONE,
2371 };
2372
2373 if (qemu_in_coroutine()) {
2374 /* Fast-path if already in coroutine context */
e293b7a3 2375 bdrv_flush_co_entry(&flush_co);
61007b31
SH
2376 } else {
2377 AioContext *aio_context = bdrv_get_aio_context(bs);
2378
0b8b8753
PB
2379 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2380 qemu_coroutine_enter(co);
e293b7a3 2381 while (flush_co.ret == NOT_DONE) {
61007b31
SH
2382 aio_poll(aio_context, true);
2383 }
2384 }
2385
e293b7a3 2386 return flush_co.ret;
61007b31
SH
2387}
2388
2389typedef struct DiscardCo {
2390 BlockDriverState *bs;
0c51a893
EB
2391 int64_t offset;
2392 int count;
61007b31
SH
2393 int ret;
2394} DiscardCo;
0c51a893 2395static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
61007b31
SH
2396{
2397 DiscardCo *rwco = opaque;
2398
0c51a893 2399 rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
61007b31
SH
2400}
2401
9f1963b3
EB
2402int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2403 int count)
61007b31 2404{
b1066c87 2405 BdrvTrackedRequest req;
9f1963b3
EB
2406 int max_pdiscard, ret;
2407 int head, align;
61007b31
SH
2408
2409 if (!bs->drv) {
2410 return -ENOMEDIUM;
2411 }
2412
9f1963b3 2413 ret = bdrv_check_byte_request(bs, offset, count);
61007b31
SH
2414 if (ret < 0) {
2415 return ret;
2416 } else if (bs->read_only) {
eaf5fe2d 2417 return -EPERM;
61007b31 2418 }
04c01a5c 2419 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31 2420
61007b31
SH
2421 /* Do nothing if disabled. */
2422 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2423 return 0;
2424 }
2425
2426 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2427 return 0;
2428 }
2429
9f1963b3
EB
2430 /* Discard is advisory, so ignore any unaligned head or tail */
2431 align = MAX(BDRV_SECTOR_SIZE,
2432 MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment));
2433 assert(is_power_of_2(align));
2434 head = MIN(count, -offset & (align - 1));
2435 if (head) {
2436 count -= head;
2437 offset += head;
2438 }
2439 count = QEMU_ALIGN_DOWN(count, align);
2440 if (!count) {
2441 return 0;
2442 }
2443
2444 tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
50824995 2445
ec050f77
DL
2446 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2447 if (ret < 0) {
2448 goto out;
2449 }
2450
9f1963b3
EB
2451 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2452 align);
61007b31 2453
9f1963b3
EB
2454 while (count > 0) {
2455 int ret;
2456 int num = MIN(count, max_pdiscard);
61007b31
SH
2457
2458 if (bs->drv->bdrv_co_discard) {
9f1963b3
EB
2459 ret = bs->drv->bdrv_co_discard(bs, offset >> BDRV_SECTOR_BITS,
2460 num >> BDRV_SECTOR_BITS);
61007b31
SH
2461 } else {
2462 BlockAIOCB *acb;
2463 CoroutineIOCompletion co = {
2464 .coroutine = qemu_coroutine_self(),
2465 };
2466
9f1963b3
EB
2467 acb = bs->drv->bdrv_aio_discard(bs, offset >> BDRV_SECTOR_BITS,
2468 num >> BDRV_SECTOR_BITS,
61007b31
SH
2469 bdrv_co_io_em_complete, &co);
2470 if (acb == NULL) {
b1066c87
FZ
2471 ret = -EIO;
2472 goto out;
61007b31
SH
2473 } else {
2474 qemu_coroutine_yield();
2475 ret = co.ret;
2476 }
2477 }
2478 if (ret && ret != -ENOTSUP) {
b1066c87 2479 goto out;
61007b31
SH
2480 }
2481
9f1963b3
EB
2482 offset += num;
2483 count -= num;
61007b31 2484 }
b1066c87
FZ
2485 ret = 0;
2486out:
3ff2f67a 2487 ++bs->write_gen;
968d8b06
DL
2488 bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2489 req.bytes >> BDRV_SECTOR_BITS);
b1066c87
FZ
2490 tracked_request_end(&req);
2491 return ret;
61007b31
SH
2492}
2493
0c51a893 2494int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
61007b31
SH
2495{
2496 Coroutine *co;
2497 DiscardCo rwco = {
2498 .bs = bs,
0c51a893
EB
2499 .offset = offset,
2500 .count = count,
61007b31
SH
2501 .ret = NOT_DONE,
2502 };
2503
2504 if (qemu_in_coroutine()) {
2505 /* Fast-path if already in coroutine context */
0c51a893 2506 bdrv_pdiscard_co_entry(&rwco);
61007b31
SH
2507 } else {
2508 AioContext *aio_context = bdrv_get_aio_context(bs);
2509
0c51a893 2510 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
0b8b8753 2511 qemu_coroutine_enter(co);
61007b31
SH
2512 while (rwco.ret == NOT_DONE) {
2513 aio_poll(aio_context, true);
2514 }
2515 }
2516
2517 return rwco.ret;
2518}
2519
5c5ae76a 2520static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
2521{
2522 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
2523 BdrvTrackedRequest tracked_req;
2524 CoroutineIOCompletion co = {
2525 .coroutine = qemu_coroutine_self(),
2526 };
2527 BlockAIOCB *acb;
61007b31 2528
5c5ae76a
FZ
2529 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2530 if (!drv || !drv->bdrv_aio_ioctl) {
2531 co.ret = -ENOTSUP;
2532 goto out;
2533 }
2534
2535 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2536 if (!acb) {
c8a9fd80
FZ
2537 co.ret = -ENOTSUP;
2538 goto out;
5c5ae76a
FZ
2539 }
2540 qemu_coroutine_yield();
2541out:
2542 tracked_request_end(&tracked_req);
2543 return co.ret;
2544}
2545
2546typedef struct {
2547 BlockDriverState *bs;
2548 int req;
2549 void *buf;
2550 int ret;
2551} BdrvIoctlCoData;
2552
2553static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2554{
2555 BdrvIoctlCoData *data = opaque;
2556 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2557}
2558
2559/* needed for generic scsi interface */
2560int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2561{
2562 BdrvIoctlCoData data = {
2563 .bs = bs,
2564 .req = req,
2565 .buf = buf,
2566 .ret = -EINPROGRESS,
2567 };
2568
2569 if (qemu_in_coroutine()) {
2570 /* Fast-path if already in coroutine context */
2571 bdrv_co_ioctl_entry(&data);
2572 } else {
0b8b8753 2573 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, &data);
ba889444 2574
0b8b8753 2575 qemu_coroutine_enter(co);
ba889444
PB
2576 while (data.ret == -EINPROGRESS) {
2577 aio_poll(bdrv_get_aio_context(bs), true);
2578 }
5c5ae76a
FZ
2579 }
2580 return data.ret;
2581}
2582
2583static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2584{
2585 BlockAIOCBCoroutine *acb = opaque;
2586 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2587 acb->req.req, acb->req.buf);
2588 bdrv_co_complete(acb);
61007b31
SH
2589}
2590
2591BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2592 unsigned long int req, void *buf,
2593 BlockCompletionFunc *cb, void *opaque)
2594{
5c5ae76a
FZ
2595 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2596 bs, cb, opaque);
2597 Coroutine *co;
61007b31 2598
5c5ae76a
FZ
2599 acb->need_bh = true;
2600 acb->req.error = -EINPROGRESS;
2601 acb->req.req = req;
2602 acb->req.buf = buf;
0b8b8753
PB
2603 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry, acb);
2604 qemu_coroutine_enter(co);
5c5ae76a
FZ
2605
2606 bdrv_co_maybe_schedule_bh(acb);
2607 return &acb->common;
61007b31
SH
2608}
2609
2610void *qemu_blockalign(BlockDriverState *bs, size_t size)
2611{
2612 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2613}
2614
2615void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2616{
2617 return memset(qemu_blockalign(bs, size), 0, size);
2618}
2619
2620void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2621{
2622 size_t align = bdrv_opt_mem_align(bs);
2623
2624 /* Ensure that NULL is never returned on success */
2625 assert(align > 0);
2626 if (size == 0) {
2627 size = align;
2628 }
2629
2630 return qemu_try_memalign(align, size);
2631}
2632
2633void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2634{
2635 void *mem = qemu_try_blockalign(bs, size);
2636
2637 if (mem) {
2638 memset(mem, 0, size);
2639 }
2640
2641 return mem;
2642}
2643
2644/*
2645 * Check if all memory in this vector is sector aligned.
2646 */
2647bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2648{
2649 int i;
4196d2f0 2650 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
2651
2652 for (i = 0; i < qiov->niov; i++) {
2653 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2654 return false;
2655 }
2656 if (qiov->iov[i].iov_len % alignment) {
2657 return false;
2658 }
2659 }
2660
2661 return true;
2662}
2663
2664void bdrv_add_before_write_notifier(BlockDriverState *bs,
2665 NotifierWithReturn *notifier)
2666{
2667 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2668}
2669
2670void bdrv_io_plug(BlockDriverState *bs)
2671{
6b98bd64
PB
2672 BdrvChild *child;
2673
2674 QLIST_FOREACH(child, &bs->children, next) {
2675 bdrv_io_plug(child->bs);
2676 }
2677
2678 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2679 BlockDriver *drv = bs->drv;
2680 if (drv && drv->bdrv_io_plug) {
2681 drv->bdrv_io_plug(bs);
2682 }
61007b31
SH
2683 }
2684}
2685
2686void bdrv_io_unplug(BlockDriverState *bs)
2687{
6b98bd64
PB
2688 BdrvChild *child;
2689
2690 assert(bs->io_plugged);
2691 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2692 BlockDriver *drv = bs->drv;
2693 if (drv && drv->bdrv_io_unplug) {
2694 drv->bdrv_io_unplug(bs);
2695 }
2696 }
2697
2698 QLIST_FOREACH(child, &bs->children, next) {
2699 bdrv_io_unplug(child->bs);
61007b31
SH
2700 }
2701}
2702
6b98bd64 2703void bdrv_io_unplugged_begin(BlockDriverState *bs)
61007b31 2704{
6b98bd64
PB
2705 BdrvChild *child;
2706
2707 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2708 BlockDriver *drv = bs->drv;
2709 if (drv && drv->bdrv_io_unplug) {
2710 drv->bdrv_io_unplug(bs);
2711 }
2712 }
2713
2714 QLIST_FOREACH(child, &bs->children, next) {
2715 bdrv_io_unplugged_begin(child->bs);
2716 }
2717}
2718
2719void bdrv_io_unplugged_end(BlockDriverState *bs)
2720{
2721 BdrvChild *child;
2722
2723 assert(bs->io_plug_disabled);
2724 QLIST_FOREACH(child, &bs->children, next) {
2725 bdrv_io_unplugged_end(child->bs);
2726 }
2727
2728 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2729 BlockDriver *drv = bs->drv;
2730 if (drv && drv->bdrv_io_plug) {
2731 drv->bdrv_io_plug(bs);
2732 }
61007b31
SH
2733 }
2734}