2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "sysemu/replay.h"
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
45 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
46 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
47 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
);
49 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
)
53 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
57 bdrv_parent_drained_begin_single(c
);
61 void bdrv_parent_drained_end_single(BdrvChild
*c
)
65 assert(c
->quiesced_parent
);
66 c
->quiesced_parent
= false;
68 if (c
->klass
->drained_end
) {
69 c
->klass
->drained_end(c
);
73 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
)
77 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
81 bdrv_parent_drained_end_single(c
);
85 bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
87 if (c
->klass
->drained_poll
) {
88 return c
->klass
->drained_poll(c
);
93 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
94 bool ignore_bds_parents
)
99 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
100 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
103 busy
|= bdrv_parent_drained_poll_single(c
);
109 void bdrv_parent_drained_begin_single(BdrvChild
*c
)
113 assert(!c
->quiesced_parent
);
114 c
->quiesced_parent
= true;
116 if (c
->klass
->drained_begin
) {
117 c
->klass
->drained_begin(c
);
121 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
123 dst
->pdiscard_alignment
= MAX(dst
->pdiscard_alignment
,
124 src
->pdiscard_alignment
);
125 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
126 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
127 dst
->max_hw_transfer
= MIN_NON_ZERO(dst
->max_hw_transfer
,
128 src
->max_hw_transfer
);
129 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
130 src
->opt_mem_alignment
);
131 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
132 src
->min_mem_alignment
);
133 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
134 dst
->max_hw_iov
= MIN_NON_ZERO(dst
->max_hw_iov
, src
->max_hw_iov
);
137 typedef struct BdrvRefreshLimitsState
{
138 BlockDriverState
*bs
;
140 } BdrvRefreshLimitsState
;
142 static void bdrv_refresh_limits_abort(void *opaque
)
144 BdrvRefreshLimitsState
*s
= opaque
;
146 s
->bs
->bl
= s
->old_bl
;
149 static TransactionActionDrv bdrv_refresh_limits_drv
= {
150 .abort
= bdrv_refresh_limits_abort
,
154 /* @tran is allowed to be NULL, in this case no rollback is possible. */
155 void bdrv_refresh_limits(BlockDriverState
*bs
, Transaction
*tran
, Error
**errp
)
158 BlockDriver
*drv
= bs
->drv
;
165 BdrvRefreshLimitsState
*s
= g_new(BdrvRefreshLimitsState
, 1);
166 *s
= (BdrvRefreshLimitsState
) {
170 tran_add(tran
, &bdrv_refresh_limits_drv
, s
);
173 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
179 /* Default alignment based on whether driver has byte interface */
180 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
181 drv
->bdrv_aio_preadv
||
182 drv
->bdrv_co_preadv_part
) ? 1 : 512;
184 /* Take some limits from the children as a default */
186 QLIST_FOREACH(c
, &bs
->children
, next
) {
187 if (c
->role
& (BDRV_CHILD_DATA
| BDRV_CHILD_FILTERED
| BDRV_CHILD_COW
))
189 bdrv_merge_limits(&bs
->bl
, &c
->bs
->bl
);
193 if (c
->role
& BDRV_CHILD_FILTERED
) {
194 bs
->bl
.has_variable_length
|= c
->bs
->bl
.has_variable_length
;
199 bs
->bl
.min_mem_alignment
= 512;
200 bs
->bl
.opt_mem_alignment
= qemu_real_host_page_size();
202 /* Safe default since most protocols use readv()/writev()/etc */
203 bs
->bl
.max_iov
= IOV_MAX
;
206 /* Then let the driver override it */
207 if (drv
->bdrv_refresh_limits
) {
208 drv
->bdrv_refresh_limits(bs
, errp
);
214 if (bs
->bl
.request_alignment
> BDRV_MAX_ALIGNMENT
) {
215 error_setg(errp
, "Driver requires too large request alignment");
220 * The copy-on-read flag is actually a reference count so multiple users may
221 * use the feature without worrying about clobbering its previous state.
222 * Copy-on-read stays enabled until all users have called to disable it.
224 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
227 qatomic_inc(&bs
->copy_on_read
);
230 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
232 int old
= qatomic_fetch_dec(&bs
->copy_on_read
);
239 BlockDriverState
*bs
;
246 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
247 bool bdrv_drain_poll(BlockDriverState
*bs
, BdrvChild
*ignore_parent
,
248 bool ignore_bds_parents
)
252 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
256 if (qatomic_read(&bs
->in_flight
)) {
263 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
,
264 BdrvChild
*ignore_parent
)
266 return bdrv_drain_poll(bs
, ignore_parent
, false);
269 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
271 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
);
273 static void bdrv_co_drain_bh_cb(void *opaque
)
275 BdrvCoDrainData
*data
= opaque
;
276 Coroutine
*co
= data
->co
;
277 BlockDriverState
*bs
= data
->bs
;
280 AioContext
*ctx
= bdrv_get_aio_context(bs
);
281 aio_context_acquire(ctx
);
282 bdrv_dec_in_flight(bs
);
284 bdrv_do_drained_begin(bs
, data
->parent
, data
->poll
);
287 bdrv_do_drained_end(bs
, data
->parent
);
289 aio_context_release(ctx
);
292 bdrv_drain_all_begin();
299 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
304 BdrvCoDrainData data
;
305 Coroutine
*self
= qemu_coroutine_self();
306 AioContext
*ctx
= bdrv_get_aio_context(bs
);
307 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(self
);
309 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
310 * other coroutines run if they were queued by aio_co_enter(). */
312 assert(qemu_in_coroutine());
313 data
= (BdrvCoDrainData
) {
323 bdrv_inc_in_flight(bs
);
327 * Temporarily drop the lock across yield or we would get deadlocks.
328 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
330 * When we yield below, the lock for the current context will be
331 * released, so if this is actually the lock that protects bs, don't drop
335 aio_context_release(ctx
);
337 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
338 bdrv_co_drain_bh_cb
, &data
);
340 qemu_coroutine_yield();
341 /* If we are resumed from some other event (such as an aio completion or a
342 * timer callback), it is a bug in the caller that should be fixed. */
345 /* Reacquire the AioContext of bs if we dropped it */
347 aio_context_acquire(ctx
);
351 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
356 if (qemu_in_coroutine()) {
357 bdrv_co_yield_to_drain(bs
, true, parent
, poll
);
363 /* Stop things in parent-to-child order */
364 if (qatomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
365 bdrv_parent_drained_begin(bs
, parent
);
366 if (bs
->drv
&& bs
->drv
->bdrv_drain_begin
) {
367 bs
->drv
->bdrv_drain_begin(bs
);
372 * Wait for drained requests to finish.
374 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
375 * call is needed so things in this AioContext can make progress even
376 * though we don't return to the main AioContext loop - this automatically
377 * includes other nodes in the same AioContext and therefore all child
381 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, parent
));
385 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
, BdrvChild
*parent
)
387 bdrv_do_drained_begin(bs
, parent
, false);
390 void bdrv_drained_begin(BlockDriverState
*bs
)
393 bdrv_do_drained_begin(bs
, NULL
, true);
397 * This function does not poll, nor must any of its recursively called
400 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
)
402 int old_quiesce_counter
;
406 if (qemu_in_coroutine()) {
407 bdrv_co_yield_to_drain(bs
, false, parent
, false);
410 assert(bs
->quiesce_counter
> 0);
413 /* Re-enable things in child-to-parent order */
414 old_quiesce_counter
= qatomic_fetch_dec(&bs
->quiesce_counter
);
415 if (old_quiesce_counter
== 1) {
416 if (bs
->drv
&& bs
->drv
->bdrv_drain_end
) {
417 bs
->drv
->bdrv_drain_end(bs
);
419 bdrv_parent_drained_end(bs
, parent
);
423 void bdrv_drained_end(BlockDriverState
*bs
)
426 bdrv_do_drained_end(bs
, NULL
);
429 void bdrv_drain(BlockDriverState
*bs
)
432 bdrv_drained_begin(bs
);
433 bdrv_drained_end(bs
);
436 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
438 BdrvChild
*child
, *next
;
440 assert(qatomic_read(&bs
->in_flight
) == 0);
441 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
442 bdrv_drain_assert_idle(child
->bs
);
446 unsigned int bdrv_drain_all_count
= 0;
448 static bool bdrv_drain_all_poll(void)
450 BlockDriverState
*bs
= NULL
;
454 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
455 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
456 while ((bs
= bdrv_next_all_states(bs
))) {
457 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
458 aio_context_acquire(aio_context
);
459 result
|= bdrv_drain_poll(bs
, NULL
, true);
460 aio_context_release(aio_context
);
467 * Wait for pending requests to complete across all BlockDriverStates
469 * This function does not flush data to disk, use bdrv_flush_all() for that
470 * after calling this function.
472 * This pauses all block jobs and disables external clients. It must
473 * be paired with bdrv_drain_all_end().
475 * NOTE: no new block jobs or BlockDriverStates can be created between
476 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
478 void bdrv_drain_all_begin_nopoll(void)
480 BlockDriverState
*bs
= NULL
;
484 * bdrv queue is managed by record/replay,
485 * waiting for finishing the I/O requests may
488 if (replay_events_enabled()) {
492 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
493 * loop AioContext, so make sure we're in the main context. */
494 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
495 assert(bdrv_drain_all_count
< INT_MAX
);
496 bdrv_drain_all_count
++;
498 /* Quiesce all nodes, without polling in-flight requests yet. The graph
499 * cannot change during this loop. */
500 while ((bs
= bdrv_next_all_states(bs
))) {
501 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
503 aio_context_acquire(aio_context
);
504 bdrv_do_drained_begin(bs
, NULL
, false);
505 aio_context_release(aio_context
);
509 void bdrv_drain_all_begin(void)
511 BlockDriverState
*bs
= NULL
;
513 if (qemu_in_coroutine()) {
514 bdrv_co_yield_to_drain(NULL
, true, NULL
, true);
519 * bdrv queue is managed by record/replay,
520 * waiting for finishing the I/O requests may
523 if (replay_events_enabled()) {
527 bdrv_drain_all_begin_nopoll();
529 /* Now poll the in-flight requests */
530 AIO_WAIT_WHILE_UNLOCKED(NULL
, bdrv_drain_all_poll());
532 while ((bs
= bdrv_next_all_states(bs
))) {
533 bdrv_drain_assert_idle(bs
);
537 void bdrv_drain_all_end_quiesce(BlockDriverState
*bs
)
541 g_assert(bs
->quiesce_counter
> 0);
542 g_assert(!bs
->refcnt
);
544 while (bs
->quiesce_counter
) {
545 bdrv_do_drained_end(bs
, NULL
);
549 void bdrv_drain_all_end(void)
551 BlockDriverState
*bs
= NULL
;
555 * bdrv queue is managed by record/replay,
556 * waiting for finishing the I/O requests may
559 if (replay_events_enabled()) {
563 while ((bs
= bdrv_next_all_states(bs
))) {
564 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
566 aio_context_acquire(aio_context
);
567 bdrv_do_drained_end(bs
, NULL
);
568 aio_context_release(aio_context
);
571 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
572 assert(bdrv_drain_all_count
> 0);
573 bdrv_drain_all_count
--;
576 void bdrv_drain_all(void)
579 bdrv_drain_all_begin();
580 bdrv_drain_all_end();
584 * Remove an active request from the tracked requests list
586 * This function should be called when a tracked request is completing.
588 static void coroutine_fn
tracked_request_end(BdrvTrackedRequest
*req
)
590 if (req
->serialising
) {
591 qatomic_dec(&req
->bs
->serialising_in_flight
);
594 qemu_mutex_lock(&req
->bs
->reqs_lock
);
595 QLIST_REMOVE(req
, list
);
596 qemu_mutex_unlock(&req
->bs
->reqs_lock
);
599 * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
600 * anymore because the request has been removed from the list, so it's safe
601 * to restart the queue outside reqs_lock to minimize the critical section.
603 qemu_co_queue_restart_all(&req
->wait_queue
);
607 * Add an active request to the tracked requests list
609 static void coroutine_fn
tracked_request_begin(BdrvTrackedRequest
*req
,
610 BlockDriverState
*bs
,
613 enum BdrvTrackedRequestType type
)
615 bdrv_check_request(offset
, bytes
, &error_abort
);
617 *req
= (BdrvTrackedRequest
){
622 .co
= qemu_coroutine_self(),
623 .serialising
= false,
624 .overlap_offset
= offset
,
625 .overlap_bytes
= bytes
,
628 qemu_co_queue_init(&req
->wait_queue
);
630 qemu_mutex_lock(&bs
->reqs_lock
);
631 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
632 qemu_mutex_unlock(&bs
->reqs_lock
);
635 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
636 int64_t offset
, int64_t bytes
)
638 bdrv_check_request(offset
, bytes
, &error_abort
);
641 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
645 if (req
->overlap_offset
>= offset
+ bytes
) {
651 /* Called with self->bs->reqs_lock held */
652 static coroutine_fn BdrvTrackedRequest
*
653 bdrv_find_conflicting_request(BdrvTrackedRequest
*self
)
655 BdrvTrackedRequest
*req
;
657 QLIST_FOREACH(req
, &self
->bs
->tracked_requests
, list
) {
658 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
661 if (tracked_request_overlaps(req
, self
->overlap_offset
,
662 self
->overlap_bytes
))
665 * Hitting this means there was a reentrant request, for
666 * example, a block driver issuing nested requests. This must
667 * never happen since it means deadlock.
669 assert(qemu_coroutine_self() != req
->co
);
672 * If the request is already (indirectly) waiting for us, or
673 * will wait for us as soon as it wakes up, then just go on
674 * (instead of producing a deadlock in the former case).
676 if (!req
->waiting_for
) {
685 /* Called with self->bs->reqs_lock held */
686 static void coroutine_fn
687 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest
*self
)
689 BdrvTrackedRequest
*req
;
691 while ((req
= bdrv_find_conflicting_request(self
))) {
692 self
->waiting_for
= req
;
693 qemu_co_queue_wait(&req
->wait_queue
, &self
->bs
->reqs_lock
);
694 self
->waiting_for
= NULL
;
698 /* Called with req->bs->reqs_lock held */
699 static void tracked_request_set_serialising(BdrvTrackedRequest
*req
,
702 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
703 int64_t overlap_bytes
=
704 ROUND_UP(req
->offset
+ req
->bytes
, align
) - overlap_offset
;
706 bdrv_check_request(req
->offset
, req
->bytes
, &error_abort
);
708 if (!req
->serialising
) {
709 qatomic_inc(&req
->bs
->serialising_in_flight
);
710 req
->serialising
= true;
713 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
714 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
718 * Return the tracked request on @bs for the current coroutine, or
719 * NULL if there is none.
721 BdrvTrackedRequest
*coroutine_fn
bdrv_co_get_self_request(BlockDriverState
*bs
)
723 BdrvTrackedRequest
*req
;
724 Coroutine
*self
= qemu_coroutine_self();
727 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
728 if (req
->co
== self
) {
737 * Round a region to subcluster (if supported) or cluster boundaries
739 void coroutine_fn GRAPH_RDLOCK
740 bdrv_round_to_subclusters(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
741 int64_t *align_offset
, int64_t *align_bytes
)
745 if (bdrv_co_get_info(bs
, &bdi
) < 0 || bdi
.subcluster_size
== 0) {
746 *align_offset
= offset
;
747 *align_bytes
= bytes
;
749 int64_t c
= bdi
.subcluster_size
;
750 *align_offset
= QEMU_ALIGN_DOWN(offset
, c
);
751 *align_bytes
= QEMU_ALIGN_UP(offset
- *align_offset
+ bytes
, c
);
755 static int coroutine_fn GRAPH_RDLOCK
bdrv_get_cluster_size(BlockDriverState
*bs
)
760 ret
= bdrv_co_get_info(bs
, &bdi
);
761 if (ret
< 0 || bdi
.cluster_size
== 0) {
762 return bs
->bl
.request_alignment
;
764 return bdi
.cluster_size
;
768 void bdrv_inc_in_flight(BlockDriverState
*bs
)
771 qatomic_inc(&bs
->in_flight
);
774 void bdrv_wakeup(BlockDriverState
*bs
)
780 void bdrv_dec_in_flight(BlockDriverState
*bs
)
783 qatomic_dec(&bs
->in_flight
);
787 static void coroutine_fn
788 bdrv_wait_serialising_requests(BdrvTrackedRequest
*self
)
790 BlockDriverState
*bs
= self
->bs
;
792 if (!qatomic_read(&bs
->serialising_in_flight
)) {
796 qemu_mutex_lock(&bs
->reqs_lock
);
797 bdrv_wait_serialising_requests_locked(self
);
798 qemu_mutex_unlock(&bs
->reqs_lock
);
801 void coroutine_fn
bdrv_make_request_serialising(BdrvTrackedRequest
*req
,
806 qemu_mutex_lock(&req
->bs
->reqs_lock
);
808 tracked_request_set_serialising(req
, align
);
809 bdrv_wait_serialising_requests_locked(req
);
811 qemu_mutex_unlock(&req
->bs
->reqs_lock
);
814 int bdrv_check_qiov_request(int64_t offset
, int64_t bytes
,
815 QEMUIOVector
*qiov
, size_t qiov_offset
,
819 * Check generic offset/bytes correctness
823 error_setg(errp
, "offset is negative: %" PRIi64
, offset
);
828 error_setg(errp
, "bytes is negative: %" PRIi64
, bytes
);
832 if (bytes
> BDRV_MAX_LENGTH
) {
833 error_setg(errp
, "bytes(%" PRIi64
") exceeds maximum(%" PRIi64
")",
834 bytes
, BDRV_MAX_LENGTH
);
838 if (offset
> BDRV_MAX_LENGTH
) {
839 error_setg(errp
, "offset(%" PRIi64
") exceeds maximum(%" PRIi64
")",
840 offset
, BDRV_MAX_LENGTH
);
844 if (offset
> BDRV_MAX_LENGTH
- bytes
) {
845 error_setg(errp
, "sum of offset(%" PRIi64
") and bytes(%" PRIi64
") "
846 "exceeds maximum(%" PRIi64
")", offset
, bytes
,
856 * Check qiov and qiov_offset
859 if (qiov_offset
> qiov
->size
) {
860 error_setg(errp
, "qiov_offset(%zu) overflow io vector size(%zu)",
861 qiov_offset
, qiov
->size
);
865 if (bytes
> qiov
->size
- qiov_offset
) {
866 error_setg(errp
, "bytes(%" PRIi64
") + qiov_offset(%zu) overflow io "
867 "vector size(%zu)", bytes
, qiov_offset
, qiov
->size
);
874 int bdrv_check_request(int64_t offset
, int64_t bytes
, Error
**errp
)
876 return bdrv_check_qiov_request(offset
, bytes
, NULL
, 0, errp
);
879 static int bdrv_check_request32(int64_t offset
, int64_t bytes
,
880 QEMUIOVector
*qiov
, size_t qiov_offset
)
882 int ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
887 if (bytes
> BDRV_REQUEST_MAX_BYTES
) {
895 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
896 * The operation is sped up by checking the block status and only writing
897 * zeroes to the device if they currently do not return zeroes. Optional
898 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
901 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
903 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
906 int64_t target_size
, bytes
, offset
= 0;
907 BlockDriverState
*bs
= child
->bs
;
910 target_size
= bdrv_getlength(bs
);
911 if (target_size
< 0) {
916 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
920 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
924 if (ret
& BDRV_BLOCK_ZERO
) {
928 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
937 * Writes to the file and ensures that no writes are reordered across this
938 * request (acts as a barrier)
940 * Returns 0 on success, -errno in error cases.
942 int coroutine_fn
bdrv_co_pwrite_sync(BdrvChild
*child
, int64_t offset
,
943 int64_t bytes
, const void *buf
,
944 BdrvRequestFlags flags
)
948 assert_bdrv_graph_readable();
950 ret
= bdrv_co_pwrite(child
, offset
, bytes
, buf
, flags
);
955 ret
= bdrv_co_flush(child
->bs
);
963 typedef struct CoroutineIOCompletion
{
964 Coroutine
*coroutine
;
966 } CoroutineIOCompletion
;
968 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
970 CoroutineIOCompletion
*co
= opaque
;
973 aio_co_wake(co
->coroutine
);
976 static int coroutine_fn GRAPH_RDLOCK
977 bdrv_driver_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
978 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
980 BlockDriver
*drv
= bs
->drv
;
982 unsigned int nb_sectors
;
983 QEMUIOVector local_qiov
;
985 assert_bdrv_graph_readable();
987 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
988 assert(!(flags
& ~bs
->supported_read_flags
));
994 if (drv
->bdrv_co_preadv_part
) {
995 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
999 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1000 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1004 if (drv
->bdrv_co_preadv
) {
1005 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
1009 if (drv
->bdrv_aio_preadv
) {
1011 CoroutineIOCompletion co
= {
1012 .coroutine
= qemu_coroutine_self(),
1015 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1016 bdrv_co_io_em_complete
, &co
);
1021 qemu_coroutine_yield();
1027 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1028 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1030 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1031 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1032 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1033 assert(drv
->bdrv_co_readv
);
1035 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1038 if (qiov
== &local_qiov
) {
1039 qemu_iovec_destroy(&local_qiov
);
1045 static int coroutine_fn GRAPH_RDLOCK
1046 bdrv_driver_pwritev(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1047 QEMUIOVector
*qiov
, size_t qiov_offset
,
1048 BdrvRequestFlags flags
)
1050 BlockDriver
*drv
= bs
->drv
;
1051 bool emulate_fua
= false;
1053 unsigned int nb_sectors
;
1054 QEMUIOVector local_qiov
;
1056 assert_bdrv_graph_readable();
1058 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1064 if ((flags
& BDRV_REQ_FUA
) &&
1065 (~bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1066 flags
&= ~BDRV_REQ_FUA
;
1070 flags
&= bs
->supported_write_flags
;
1072 if (drv
->bdrv_co_pwritev_part
) {
1073 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1078 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1079 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1083 if (drv
->bdrv_co_pwritev
) {
1084 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1088 if (drv
->bdrv_aio_pwritev
) {
1090 CoroutineIOCompletion co
= {
1091 .coroutine
= qemu_coroutine_self(),
1094 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
, flags
,
1095 bdrv_co_io_em_complete
, &co
);
1099 qemu_coroutine_yield();
1105 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1106 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1108 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1109 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1110 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1112 assert(drv
->bdrv_co_writev
);
1113 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
, flags
);
1116 if (ret
== 0 && emulate_fua
) {
1117 ret
= bdrv_co_flush(bs
);
1120 if (qiov
== &local_qiov
) {
1121 qemu_iovec_destroy(&local_qiov
);
1127 static int coroutine_fn GRAPH_RDLOCK
1128 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, int64_t offset
,
1129 int64_t bytes
, QEMUIOVector
*qiov
,
1132 BlockDriver
*drv
= bs
->drv
;
1133 QEMUIOVector local_qiov
;
1135 assert_bdrv_graph_readable();
1137 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1143 if (!block_driver_can_compress(drv
)) {
1147 if (drv
->bdrv_co_pwritev_compressed_part
) {
1148 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1152 if (qiov_offset
== 0) {
1153 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1156 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1157 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1158 qemu_iovec_destroy(&local_qiov
);
1163 static int coroutine_fn GRAPH_RDLOCK
1164 bdrv_co_do_copy_on_readv(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1165 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1167 BlockDriverState
*bs
= child
->bs
;
1169 /* Perform I/O through a temporary buffer so that users who scribble over
1170 * their read buffer while the operation is in progress do not end up
1171 * modifying the image file. This is critical for zero-copy guest I/O
1172 * where anything might happen inside guest memory.
1174 void *bounce_buffer
= NULL
;
1176 BlockDriver
*drv
= bs
->drv
;
1177 int64_t align_offset
;
1178 int64_t align_bytes
;
1181 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1182 BDRV_REQUEST_MAX_BYTES
);
1183 int64_t progress
= 0;
1186 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1193 * Do not write anything when the BDS is inactive. That is not
1194 * allowed, and it would not help.
1196 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1198 /* FIXME We cannot require callers to have write permissions when all they
1199 * are doing is a read request. If we did things right, write permissions
1200 * would be obtained anyway, but internally by the copy-on-read code. As
1201 * long as it is implemented here rather than in a separate filter driver,
1202 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1203 * it could request permissions. Therefore we have to bypass the permission
1204 * system for the moment. */
1205 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1207 /* Cover entire cluster so no additional backing file I/O is required when
1208 * allocating cluster in the image file. Note that this value may exceed
1209 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1210 * is one reason we loop rather than doing it all at once.
1212 bdrv_round_to_subclusters(bs
, offset
, bytes
, &align_offset
, &align_bytes
);
1213 skip_bytes
= offset
- align_offset
;
1215 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1216 align_offset
, align_bytes
);
1218 while (align_bytes
) {
1222 ret
= 1; /* "already allocated", so nothing will be copied */
1223 pnum
= MIN(align_bytes
, max_transfer
);
1225 ret
= bdrv_is_allocated(bs
, align_offset
,
1226 MIN(align_bytes
, max_transfer
), &pnum
);
1229 * Safe to treat errors in querying allocation as if
1230 * unallocated; we'll probably fail again soon on the
1231 * read, but at least that will set a decent errno.
1233 pnum
= MIN(align_bytes
, max_transfer
);
1236 /* Stop at EOF if the image ends in the middle of the cluster */
1237 if (ret
== 0 && pnum
== 0) {
1238 assert(progress
>= bytes
);
1242 assert(skip_bytes
< pnum
);
1246 QEMUIOVector local_qiov
;
1248 /* Must copy-on-read; use the bounce buffer */
1249 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1250 if (!bounce_buffer
) {
1251 int64_t max_we_need
= MAX(pnum
, align_bytes
- pnum
);
1252 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1253 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1255 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1256 if (!bounce_buffer
) {
1261 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1263 ret
= bdrv_driver_preadv(bs
, align_offset
, pnum
,
1269 bdrv_co_debug_event(bs
, BLKDBG_COR_WRITE
);
1270 if (drv
->bdrv_co_pwrite_zeroes
&&
1271 buffer_is_zero(bounce_buffer
, pnum
)) {
1272 /* FIXME: Should we (perhaps conditionally) be setting
1273 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1274 * that still correctly reads as zero? */
1275 ret
= bdrv_co_do_pwrite_zeroes(bs
, align_offset
, pnum
,
1276 BDRV_REQ_WRITE_UNCHANGED
);
1278 /* This does not change the data on the disk, it is not
1279 * necessary to flush even in cache=writethrough mode.
1281 ret
= bdrv_driver_pwritev(bs
, align_offset
, pnum
,
1283 BDRV_REQ_WRITE_UNCHANGED
);
1287 /* It might be okay to ignore write errors for guest
1288 * requests. If this is a deliberate copy-on-read
1289 * then we don't want to ignore the error. Simply
1290 * report it in all cases.
1295 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1296 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1297 bounce_buffer
+ skip_bytes
,
1298 MIN(pnum
- skip_bytes
, bytes
- progress
));
1300 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1301 /* Read directly into the destination */
1302 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1303 MIN(pnum
- skip_bytes
, bytes
- progress
),
1304 qiov
, qiov_offset
+ progress
, 0);
1310 align_offset
+= pnum
;
1311 align_bytes
-= pnum
;
1312 progress
+= pnum
- skip_bytes
;
1318 qemu_vfree(bounce_buffer
);
1323 * Forwards an already correctly aligned request to the BlockDriver. This
1324 * handles copy on read, zeroing after EOF, and fragmentation of large
1325 * reads; any other features must be implemented by the caller.
1327 static int coroutine_fn GRAPH_RDLOCK
1328 bdrv_aligned_preadv(BdrvChild
*child
, BdrvTrackedRequest
*req
,
1329 int64_t offset
, int64_t bytes
, int64_t align
,
1330 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1332 BlockDriverState
*bs
= child
->bs
;
1333 int64_t total_bytes
, max_bytes
;
1335 int64_t bytes_remaining
= bytes
;
1338 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1339 assert(is_power_of_2(align
));
1340 assert((offset
& (align
- 1)) == 0);
1341 assert((bytes
& (align
- 1)) == 0);
1342 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1343 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1347 * TODO: We would need a per-BDS .supported_read_flags and
1348 * potential fallback support, if we ever implement any read flags
1349 * to pass through to drivers. For now, there aren't any
1350 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1352 assert(!(flags
& ~(BDRV_REQ_COPY_ON_READ
| BDRV_REQ_PREFETCH
|
1353 BDRV_REQ_REGISTERED_BUF
)));
1355 /* Handle Copy on Read and associated serialisation */
1356 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1357 /* If we touch the same cluster it counts as an overlap. This
1358 * guarantees that allocating writes will be serialized and not race
1359 * with each other for the same cluster. For example, in copy-on-read
1360 * it ensures that the CoR read and write operations are atomic and
1361 * guest writes cannot interleave between them. */
1362 bdrv_make_request_serialising(req
, bdrv_get_cluster_size(bs
));
1364 bdrv_wait_serialising_requests(req
);
1367 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1370 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1371 flags
&= ~BDRV_REQ_COPY_ON_READ
;
1373 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1378 if (!ret
|| pnum
!= bytes
) {
1379 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1380 qiov
, qiov_offset
, flags
);
1382 } else if (flags
& BDRV_REQ_PREFETCH
) {
1387 /* Forward the request to the BlockDriver, possibly fragmenting it */
1388 total_bytes
= bdrv_co_getlength(bs
);
1389 if (total_bytes
< 0) {
1394 assert(!(flags
& ~(bs
->supported_read_flags
| BDRV_REQ_REGISTERED_BUF
)));
1396 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1397 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1398 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1402 while (bytes_remaining
) {
1406 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1409 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1411 qiov_offset
+ bytes
- bytes_remaining
,
1415 num
= bytes_remaining
;
1416 ret
= qemu_iovec_memset(qiov
, qiov_offset
+ bytes
- bytes_remaining
,
1417 0, bytes_remaining
);
1422 bytes_remaining
-= num
;
1426 return ret
< 0 ? ret
: 0;
1432 * |<---- align ----->| |<----- align ---->|
1433 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1435 * -*----------$-------*-------- ... --------*-----$------------*---
1437 * | offset | | end |
1438 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1439 * [buf ... ) [tail_buf )
1441 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1442 * is placed at the beginning of @buf and @tail at the @end.
1444 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1445 * around tail, if tail exists.
1447 * @merge_reads is true for small requests,
1448 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1449 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1451 * @write is true for write requests, false for read requests.
1453 * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1454 * merge existing vector elements into a single one. @collapse_bounce_buf acts
1455 * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse
1456 * I/O vector elements so for read requests, the data can be copied back after
1459 typedef struct BdrvRequestPadding
{
1467 QEMUIOVector local_qiov
;
1469 uint8_t *collapse_bounce_buf
;
1470 size_t collapse_len
;
1471 QEMUIOVector pre_collapse_qiov
;
1472 } BdrvRequestPadding
;
1474 static bool bdrv_init_padding(BlockDriverState
*bs
,
1475 int64_t offset
, int64_t bytes
,
1477 BdrvRequestPadding
*pad
)
1479 int64_t align
= bs
->bl
.request_alignment
;
1482 bdrv_check_request(offset
, bytes
, &error_abort
);
1483 assert(align
<= INT_MAX
); /* documented in block/block_int.h */
1484 assert(align
<= SIZE_MAX
/ 2); /* so we can allocate the buffer */
1486 memset(pad
, 0, sizeof(*pad
));
1488 pad
->head
= offset
& (align
- 1);
1489 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1491 pad
->tail
= align
- pad
->tail
;
1494 if (!pad
->head
&& !pad
->tail
) {
1498 assert(bytes
); /* Nothing good in aligning zero-length requests */
1500 sum
= pad
->head
+ bytes
+ pad
->tail
;
1501 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1502 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1503 pad
->merge_reads
= sum
== pad
->buf_len
;
1505 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1513 static int coroutine_fn GRAPH_RDLOCK
1514 bdrv_padding_rmw_read(BdrvChild
*child
, BdrvTrackedRequest
*req
,
1515 BdrvRequestPadding
*pad
, bool zero_middle
)
1517 QEMUIOVector local_qiov
;
1518 BlockDriverState
*bs
= child
->bs
;
1519 uint64_t align
= bs
->bl
.request_alignment
;
1522 assert(req
->serialising
&& pad
->buf
);
1524 if (pad
->head
|| pad
->merge_reads
) {
1525 int64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1527 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1530 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1532 if (pad
->merge_reads
&& pad
->tail
) {
1533 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1535 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1536 align
, &local_qiov
, 0, 0);
1541 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1543 if (pad
->merge_reads
&& pad
->tail
) {
1544 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1547 if (pad
->merge_reads
) {
1553 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1555 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1556 ret
= bdrv_aligned_preadv(
1558 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1559 align
, align
, &local_qiov
, 0, 0);
1563 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1568 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1575 * Free *pad's associated buffers, and perform any necessary finalization steps.
1577 static void bdrv_padding_finalize(BdrvRequestPadding
*pad
)
1579 if (pad
->collapse_bounce_buf
) {
1582 * If padding required elements in the vector to be collapsed into a
1583 * bounce buffer, copy the bounce buffer content back
1585 qemu_iovec_from_buf(&pad
->pre_collapse_qiov
, 0,
1586 pad
->collapse_bounce_buf
, pad
->collapse_len
);
1588 qemu_vfree(pad
->collapse_bounce_buf
);
1589 qemu_iovec_destroy(&pad
->pre_collapse_qiov
);
1592 qemu_vfree(pad
->buf
);
1593 qemu_iovec_destroy(&pad
->local_qiov
);
1595 memset(pad
, 0, sizeof(*pad
));
1599 * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1600 * ensuring that the resulting vector will not exceed IOV_MAX elements.
1602 * To ensure this, when necessary, the first two or three elements of @iov are
1603 * merged into pad->collapse_bounce_buf and replaced by a reference to that
1604 * bounce buffer in pad->local_qiov.
1606 * After performing a read request, the data from the bounce buffer must be
1607 * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1609 static int bdrv_create_padded_qiov(BlockDriverState
*bs
,
1610 BdrvRequestPadding
*pad
,
1611 struct iovec
*iov
, int niov
,
1612 size_t iov_offset
, size_t bytes
)
1614 int padded_niov
, surplus_count
, collapse_count
;
1616 /* Assert this invariant */
1617 assert(niov
<= IOV_MAX
);
1620 * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error
1621 * to the guest is not ideal, but there is little else we can do. At least
1622 * this will practically never happen on 64-bit systems.
1624 if (SIZE_MAX
- pad
->head
< bytes
||
1625 SIZE_MAX
- pad
->head
- bytes
< pad
->tail
)
1630 /* Length of the resulting IOV if we just concatenated everything */
1631 padded_niov
= !!pad
->head
+ niov
+ !!pad
->tail
;
1633 qemu_iovec_init(&pad
->local_qiov
, MIN(padded_niov
, IOV_MAX
));
1636 qemu_iovec_add(&pad
->local_qiov
, pad
->buf
, pad
->head
);
1640 * If padded_niov > IOV_MAX, we cannot just concatenate everything.
1641 * Instead, merge the first two or three elements of @iov to reduce the
1642 * number of vector elements as necessary.
1644 if (padded_niov
> IOV_MAX
) {
1646 * Only head and tail can have lead to the number of entries exceeding
1647 * IOV_MAX, so we can exceed it by the head and tail at most. We need
1648 * to reduce the number of elements by `surplus_count`, so we merge that
1649 * many elements plus one into one element.
1651 surplus_count
= padded_niov
- IOV_MAX
;
1652 assert(surplus_count
<= !!pad
->head
+ !!pad
->tail
);
1653 collapse_count
= surplus_count
+ 1;
1656 * Move the elements to collapse into `pad->pre_collapse_qiov`, then
1657 * advance `iov` (and associated variables) by those elements.
1659 qemu_iovec_init(&pad
->pre_collapse_qiov
, collapse_count
);
1660 qemu_iovec_concat_iov(&pad
->pre_collapse_qiov
, iov
,
1661 collapse_count
, iov_offset
, SIZE_MAX
);
1662 iov
+= collapse_count
;
1664 niov
-= collapse_count
;
1665 bytes
-= pad
->pre_collapse_qiov
.size
;
1668 * Construct the bounce buffer to match the length of the to-collapse
1669 * vector elements, and for write requests, initialize it with the data
1670 * from those elements. Then add it to `pad->local_qiov`.
1672 pad
->collapse_len
= pad
->pre_collapse_qiov
.size
;
1673 pad
->collapse_bounce_buf
= qemu_blockalign(bs
, pad
->collapse_len
);
1675 qemu_iovec_to_buf(&pad
->pre_collapse_qiov
, 0,
1676 pad
->collapse_bounce_buf
, pad
->collapse_len
);
1678 qemu_iovec_add(&pad
->local_qiov
,
1679 pad
->collapse_bounce_buf
, pad
->collapse_len
);
1682 qemu_iovec_concat_iov(&pad
->local_qiov
, iov
, niov
, iov_offset
, bytes
);
1685 qemu_iovec_add(&pad
->local_qiov
,
1686 pad
->buf
+ pad
->buf_len
- pad
->tail
, pad
->tail
);
1689 assert(pad
->local_qiov
.niov
== MIN(padded_niov
, IOV_MAX
));
1696 * Exchange request parameters with padded request if needed. Don't include RMW
1697 * read of padding, bdrv_padding_rmw_read() should be called separately if
1700 * @write is true for write requests, false for read requests.
1702 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1703 * - on function start they represent original request
1704 * - on failure or when padding is not needed they are unchanged
1705 * - on success when padding is needed they represent padded request
1707 static int bdrv_pad_request(BlockDriverState
*bs
,
1708 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1709 int64_t *offset
, int64_t *bytes
,
1711 BdrvRequestPadding
*pad
, bool *padded
,
1712 BdrvRequestFlags
*flags
)
1715 struct iovec
*sliced_iov
;
1717 size_t sliced_head
, sliced_tail
;
1719 /* Should have been checked by the caller already */
1720 ret
= bdrv_check_request32(*offset
, *bytes
, *qiov
, *qiov_offset
);
1725 if (!bdrv_init_padding(bs
, *offset
, *bytes
, write
, pad
)) {
1732 sliced_iov
= qemu_iovec_slice(*qiov
, *qiov_offset
, *bytes
,
1733 &sliced_head
, &sliced_tail
,
1736 /* Guaranteed by bdrv_check_request32() */
1737 assert(*bytes
<= SIZE_MAX
);
1738 ret
= bdrv_create_padded_qiov(bs
, pad
, sliced_iov
, sliced_niov
,
1739 sliced_head
, *bytes
);
1741 bdrv_padding_finalize(pad
);
1744 *bytes
+= pad
->head
+ pad
->tail
;
1745 *offset
-= pad
->head
;
1746 *qiov
= &pad
->local_qiov
;
1752 /* Can't use optimization hint with bounce buffer */
1753 *flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1759 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1760 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1761 BdrvRequestFlags flags
)
1764 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1767 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1768 int64_t offset
, int64_t bytes
,
1769 QEMUIOVector
*qiov
, size_t qiov_offset
,
1770 BdrvRequestFlags flags
)
1772 BlockDriverState
*bs
= child
->bs
;
1773 BdrvTrackedRequest req
;
1774 BdrvRequestPadding pad
;
1778 trace_bdrv_co_preadv_part(bs
, offset
, bytes
, flags
);
1780 if (!bdrv_co_is_inserted(bs
)) {
1784 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
1789 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
1791 * Aligning zero request is nonsense. Even if driver has special meaning
1792 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1793 * it to driver due to request_alignment.
1795 * Still, no reason to return an error if someone do unaligned
1796 * zero-length read occasionally.
1801 bdrv_inc_in_flight(bs
);
1803 /* Don't do copy-on-read if we read data before write operation */
1804 if (qatomic_read(&bs
->copy_on_read
)) {
1805 flags
|= BDRV_REQ_COPY_ON_READ
;
1808 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, false,
1809 &pad
, NULL
, &flags
);
1814 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1815 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1816 bs
->bl
.request_alignment
,
1817 qiov
, qiov_offset
, flags
);
1818 tracked_request_end(&req
);
1819 bdrv_padding_finalize(&pad
);
1822 bdrv_dec_in_flight(bs
);
1827 static int coroutine_fn GRAPH_RDLOCK
1828 bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1829 BdrvRequestFlags flags
)
1831 BlockDriver
*drv
= bs
->drv
;
1835 bool need_flush
= false;
1839 int64_t max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
,
1841 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1842 bs
->bl
.request_alignment
);
1843 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1845 assert_bdrv_graph_readable();
1846 bdrv_check_request(offset
, bytes
, &error_abort
);
1852 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1856 /* By definition there is no user buffer so this flag doesn't make sense */
1857 if (flags
& BDRV_REQ_REGISTERED_BUF
) {
1861 /* Invalidate the cached block-status data range if this write overlaps */
1862 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
1864 assert(alignment
% bs
->bl
.request_alignment
== 0);
1865 head
= offset
% alignment
;
1866 tail
= (offset
+ bytes
) % alignment
;
1867 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1868 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1870 while (bytes
> 0 && !ret
) {
1871 int64_t num
= bytes
;
1873 /* Align request. Block drivers can expect the "bulk" of the request
1874 * to be aligned, and that unaligned requests do not cross cluster
1878 /* Make a small request up to the first aligned sector. For
1879 * convenience, limit this request to max_transfer even if
1880 * we don't need to fall back to writes. */
1881 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1882 head
= (head
+ num
) % alignment
;
1883 assert(num
< max_write_zeroes
);
1884 } else if (tail
&& num
> alignment
) {
1885 /* Shorten the request to the last aligned sector. */
1889 /* limit request size */
1890 if (num
> max_write_zeroes
) {
1891 num
= max_write_zeroes
;
1895 /* First try the efficient write zeroes operation */
1896 if (drv
->bdrv_co_pwrite_zeroes
) {
1897 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1898 flags
& bs
->supported_zero_flags
);
1899 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1900 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1904 assert(!bs
->supported_zero_flags
);
1907 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1908 /* Fall back to bounce buffer if write zeroes is unsupported */
1909 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1911 if ((flags
& BDRV_REQ_FUA
) &&
1912 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1913 /* No need for bdrv_driver_pwrite() to do a fallback
1914 * flush on each chunk; use just one at the end */
1915 write_flags
&= ~BDRV_REQ_FUA
;
1918 num
= MIN(num
, max_transfer
);
1920 buf
= qemu_try_blockalign0(bs
, num
);
1926 qemu_iovec_init_buf(&qiov
, buf
, num
);
1928 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1930 /* Keep bounce buffer around if it is big enough for all
1931 * all future requests.
1933 if (num
< max_transfer
) {
1944 if (ret
== 0 && need_flush
) {
1945 ret
= bdrv_co_flush(bs
);
1951 static inline int coroutine_fn GRAPH_RDLOCK
1952 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1953 BdrvTrackedRequest
*req
, int flags
)
1955 BlockDriverState
*bs
= child
->bs
;
1957 bdrv_check_request(offset
, bytes
, &error_abort
);
1959 if (bdrv_is_read_only(bs
)) {
1963 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1964 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1965 assert(!(flags
& ~BDRV_REQ_MASK
));
1966 assert(!((flags
& BDRV_REQ_NO_WAIT
) && !(flags
& BDRV_REQ_SERIALISING
)));
1968 if (flags
& BDRV_REQ_SERIALISING
) {
1969 QEMU_LOCK_GUARD(&bs
->reqs_lock
);
1971 tracked_request_set_serialising(req
, bdrv_get_cluster_size(bs
));
1973 if ((flags
& BDRV_REQ_NO_WAIT
) && bdrv_find_conflicting_request(req
)) {
1977 bdrv_wait_serialising_requests_locked(req
);
1979 bdrv_wait_serialising_requests(req
);
1982 assert(req
->overlap_offset
<= offset
);
1983 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1984 assert(offset
+ bytes
<= bs
->total_sectors
* BDRV_SECTOR_SIZE
||
1985 child
->perm
& BLK_PERM_RESIZE
);
1987 switch (req
->type
) {
1988 case BDRV_TRACKED_WRITE
:
1989 case BDRV_TRACKED_DISCARD
:
1990 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1991 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1993 assert(child
->perm
& BLK_PERM_WRITE
);
1995 bdrv_write_threshold_check_write(bs
, offset
, bytes
);
1997 case BDRV_TRACKED_TRUNCATE
:
1998 assert(child
->perm
& BLK_PERM_RESIZE
);
2005 static inline void coroutine_fn
2006 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
2007 BdrvTrackedRequest
*req
, int ret
)
2009 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
2010 BlockDriverState
*bs
= child
->bs
;
2012 bdrv_check_request(offset
, bytes
, &error_abort
);
2014 qatomic_inc(&bs
->write_gen
);
2017 * Discard cannot extend the image, but in error handling cases, such as
2018 * when reverting a qcow2 cluster allocation, the discarded range can pass
2019 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2020 * here. Instead, just skip it, since semantically a discard request
2021 * beyond EOF cannot expand the image anyway.
2024 (req
->type
== BDRV_TRACKED_TRUNCATE
||
2025 end_sector
> bs
->total_sectors
) &&
2026 req
->type
!= BDRV_TRACKED_DISCARD
) {
2027 bs
->total_sectors
= end_sector
;
2028 bdrv_parent_cb_resize(bs
);
2029 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
2032 switch (req
->type
) {
2033 case BDRV_TRACKED_WRITE
:
2034 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
2035 /* fall through, to set dirty bits */
2036 case BDRV_TRACKED_DISCARD
:
2037 bdrv_set_dirty(bs
, offset
, bytes
);
2046 * Forwards an already correctly aligned write request to the BlockDriver,
2047 * after possibly fragmenting it.
2049 static int coroutine_fn GRAPH_RDLOCK
2050 bdrv_aligned_pwritev(BdrvChild
*child
, BdrvTrackedRequest
*req
,
2051 int64_t offset
, int64_t bytes
, int64_t align
,
2052 QEMUIOVector
*qiov
, size_t qiov_offset
,
2053 BdrvRequestFlags flags
)
2055 BlockDriverState
*bs
= child
->bs
;
2056 BlockDriver
*drv
= bs
->drv
;
2059 int64_t bytes_remaining
= bytes
;
2062 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
2068 if (bdrv_has_readonly_bitmaps(bs
)) {
2072 assert(is_power_of_2(align
));
2073 assert((offset
& (align
- 1)) == 0);
2074 assert((bytes
& (align
- 1)) == 0);
2075 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
2078 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
2080 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
2081 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
2082 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
2083 flags
|= BDRV_REQ_ZERO_WRITE
;
2084 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
2085 flags
|= BDRV_REQ_MAY_UNMAP
;
2088 /* Can't use optimization hint with bufferless zero write */
2089 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
2093 /* Do nothing, write notifier decided to fail this request */
2094 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2095 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
2096 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
2097 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
2098 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
2100 } else if (bytes
<= max_transfer
) {
2101 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV
);
2102 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
2104 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV
);
2105 while (bytes_remaining
) {
2106 int num
= MIN(bytes_remaining
, max_transfer
);
2107 int local_flags
= flags
;
2110 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
2111 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
2112 /* If FUA is going to be emulated by flush, we only
2113 * need to flush on the last iteration */
2114 local_flags
&= ~BDRV_REQ_FUA
;
2117 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
2119 qiov_offset
+ bytes
- bytes_remaining
,
2124 bytes_remaining
-= num
;
2127 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
2132 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
2137 static int coroutine_fn GRAPH_RDLOCK
2138 bdrv_co_do_zero_pwritev(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
2139 BdrvRequestFlags flags
, BdrvTrackedRequest
*req
)
2141 BlockDriverState
*bs
= child
->bs
;
2142 QEMUIOVector local_qiov
;
2143 uint64_t align
= bs
->bl
.request_alignment
;
2146 BdrvRequestPadding pad
;
2148 /* This flag doesn't make sense for padding or zero writes */
2149 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
2151 padding
= bdrv_init_padding(bs
, offset
, bytes
, true, &pad
);
2153 assert(!(flags
& BDRV_REQ_NO_WAIT
));
2154 bdrv_make_request_serialising(req
, align
);
2156 bdrv_padding_rmw_read(child
, req
, &pad
, true);
2158 if (pad
.head
|| pad
.merge_reads
) {
2159 int64_t aligned_offset
= offset
& ~(align
- 1);
2160 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2162 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2163 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2164 align
, &local_qiov
, 0,
2165 flags
& ~BDRV_REQ_ZERO_WRITE
);
2166 if (ret
< 0 || pad
.merge_reads
) {
2167 /* Error or all work is done */
2170 offset
+= write_bytes
- pad
.head
;
2171 bytes
-= write_bytes
- pad
.head
;
2175 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2176 if (bytes
>= align
) {
2177 /* Write the aligned part in the middle. */
2178 int64_t aligned_bytes
= bytes
& ~(align
- 1);
2179 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2184 bytes
-= aligned_bytes
;
2185 offset
+= aligned_bytes
;
2188 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2190 assert(align
== pad
.tail
+ bytes
);
2192 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2193 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2195 flags
& ~BDRV_REQ_ZERO_WRITE
);
2199 bdrv_padding_finalize(&pad
);
2205 * Handle a write request in coroutine context
2207 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2208 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
2209 BdrvRequestFlags flags
)
2212 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2215 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2216 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2217 BdrvRequestFlags flags
)
2219 BlockDriverState
*bs
= child
->bs
;
2220 BdrvTrackedRequest req
;
2221 uint64_t align
= bs
->bl
.request_alignment
;
2222 BdrvRequestPadding pad
;
2224 bool padded
= false;
2227 trace_bdrv_co_pwritev_part(child
->bs
, offset
, bytes
, flags
);
2229 if (!bdrv_co_is_inserted(bs
)) {
2233 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2234 ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
2236 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
2242 /* If the request is misaligned then we can't make it efficient */
2243 if ((flags
& BDRV_REQ_NO_FALLBACK
) &&
2244 !QEMU_IS_ALIGNED(offset
| bytes
, align
))
2249 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
2251 * Aligning zero request is nonsense. Even if driver has special meaning
2252 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2253 * it to driver due to request_alignment.
2255 * Still, no reason to return an error if someone do unaligned
2256 * zero-length write occasionally.
2261 if (!(flags
& BDRV_REQ_ZERO_WRITE
)) {
2263 * Pad request for following read-modify-write cycle.
2264 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2265 * alignment only if there is no ZERO flag.
2267 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, true,
2268 &pad
, &padded
, &flags
);
2274 bdrv_inc_in_flight(bs
);
2275 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2277 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2279 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2285 * Request was unaligned to request_alignment and therefore
2286 * padded. We are going to do read-modify-write, and must
2287 * serialize the request to prevent interactions of the
2288 * widened region with other transactions.
2290 assert(!(flags
& BDRV_REQ_NO_WAIT
));
2291 bdrv_make_request_serialising(&req
, align
);
2292 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2295 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2296 qiov
, qiov_offset
, flags
);
2298 bdrv_padding_finalize(&pad
);
2301 tracked_request_end(&req
);
2302 bdrv_dec_in_flight(bs
);
2307 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2308 int64_t bytes
, BdrvRequestFlags flags
)
2311 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2312 assert_bdrv_graph_readable();
2314 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2315 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2318 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2319 BDRV_REQ_ZERO_WRITE
| flags
);
2323 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2325 int bdrv_flush_all(void)
2327 BdrvNextIterator it
;
2328 BlockDriverState
*bs
= NULL
;
2331 GLOBAL_STATE_CODE();
2334 * bdrv queue is managed by record/replay,
2335 * creating new flush request for stopping
2336 * the VM may break the determinism
2338 if (replay_events_enabled()) {
2342 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2343 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2346 aio_context_acquire(aio_context
);
2347 ret
= bdrv_flush(bs
);
2348 if (ret
< 0 && !result
) {
2351 aio_context_release(aio_context
);
2358 * Returns the allocation status of the specified sectors.
2359 * Drivers not implementing the functionality are assumed to not support
2360 * backing files, hence all their sectors are reported as allocated.
2362 * If 'want_zero' is true, the caller is querying for mapping
2363 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2364 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2365 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2367 * If 'offset' is beyond the end of the disk image the return value is
2368 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2370 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2371 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2372 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2374 * 'pnum' is set to the number of bytes (including and immediately
2375 * following the specified offset) that are easily known to be in the
2376 * same allocated/unallocated state. Note that a second call starting
2377 * at the original offset plus returned pnum may have the same status.
2378 * The returned value is non-zero on success except at end-of-file.
2380 * Returns negative errno on failure. Otherwise, if the
2381 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2382 * set to the host mapping and BDS corresponding to the guest offset.
2384 static int coroutine_fn GRAPH_RDLOCK
2385 bdrv_co_block_status(BlockDriverState
*bs
, bool want_zero
,
2386 int64_t offset
, int64_t bytes
,
2387 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2390 int64_t n
; /* bytes */
2392 int64_t local_map
= 0;
2393 BlockDriverState
*local_file
= NULL
;
2394 int64_t aligned_offset
, aligned_bytes
;
2396 bool has_filtered_child
;
2399 assert_bdrv_graph_readable();
2401 total_size
= bdrv_co_getlength(bs
);
2402 if (total_size
< 0) {
2407 if (offset
>= total_size
) {
2408 ret
= BDRV_BLOCK_EOF
;
2416 n
= total_size
- offset
;
2421 /* Must be non-NULL or bdrv_co_getlength() would have failed */
2423 has_filtered_child
= bdrv_filter_child(bs
);
2424 if (!bs
->drv
->bdrv_co_block_status
&& !has_filtered_child
) {
2426 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2427 if (offset
+ bytes
== total_size
) {
2428 ret
|= BDRV_BLOCK_EOF
;
2430 if (bs
->drv
->protocol_name
) {
2431 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2438 bdrv_inc_in_flight(bs
);
2440 /* Round out to request_alignment boundaries */
2441 align
= bs
->bl
.request_alignment
;
2442 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2443 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2445 if (bs
->drv
->bdrv_co_block_status
) {
2447 * Use the block-status cache only for protocol nodes: Format
2448 * drivers are generally quick to inquire the status, but protocol
2449 * drivers often need to get information from outside of qemu, so
2450 * we do not have control over the actual implementation. There
2451 * have been cases where inquiring the status took an unreasonably
2452 * long time, and we can do nothing in qemu to fix it.
2453 * This is especially problematic for images with large data areas,
2454 * because finding the few holes in them and giving them special
2455 * treatment does not gain much performance. Therefore, we try to
2456 * cache the last-identified data region.
2458 * Second, limiting ourselves to protocol nodes allows us to assume
2459 * the block status for data regions to be DATA | OFFSET_VALID, and
2460 * that the host offset is the same as the guest offset.
2462 * Note that it is possible that external writers zero parts of
2463 * the cached regions without the cache being invalidated, and so
2464 * we may report zeroes as data. This is not catastrophic,
2465 * however, because reporting zeroes as data is fine.
2467 if (QLIST_EMPTY(&bs
->children
) &&
2468 bdrv_bsc_is_data(bs
, aligned_offset
, pnum
))
2470 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
2472 local_map
= aligned_offset
;
2474 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2475 aligned_bytes
, pnum
, &local_map
,
2479 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2480 * the cache is queried above. Technically, we do not need to check
2481 * it here; the worst that can happen is that we fill the cache for
2482 * non-protocol nodes, and then it is never used. However, filling
2483 * the cache requires an RCU update, so double check here to avoid
2484 * such an update if possible.
2486 * Check want_zero, because we only want to update the cache when we
2487 * have accurate information about what is zero and what is data.
2490 ret
== (BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
) &&
2491 QLIST_EMPTY(&bs
->children
))
2494 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2495 * returned local_map value must be the same as the offset we
2496 * have passed (aligned_offset), and local_bs must be the node
2498 * Assert this, because we follow this rule when reading from
2499 * the cache (see the `local_file = bs` and
2500 * `local_map = aligned_offset` assignments above), and the
2501 * result the cache delivers must be the same as the driver
2504 assert(local_file
== bs
);
2505 assert(local_map
== aligned_offset
);
2506 bdrv_bsc_fill(bs
, aligned_offset
, *pnum
);
2510 /* Default code for filters */
2512 local_file
= bdrv_filter_bs(bs
);
2515 *pnum
= aligned_bytes
;
2516 local_map
= aligned_offset
;
2517 ret
= BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2525 * The driver's result must be a non-zero multiple of request_alignment.
2526 * Clamp pnum and adjust map to original request.
2528 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2529 align
> offset
- aligned_offset
);
2530 if (ret
& BDRV_BLOCK_RECURSE
) {
2531 assert(ret
& BDRV_BLOCK_DATA
);
2532 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2533 assert(!(ret
& BDRV_BLOCK_ZERO
));
2536 *pnum
-= offset
- aligned_offset
;
2537 if (*pnum
> bytes
) {
2540 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2541 local_map
+= offset
- aligned_offset
;
2544 if (ret
& BDRV_BLOCK_RAW
) {
2545 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2546 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2547 *pnum
, pnum
, &local_map
, &local_file
);
2551 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2552 ret
|= BDRV_BLOCK_ALLOCATED
;
2553 } else if (bs
->drv
->supports_backing
) {
2554 BlockDriverState
*cow_bs
= bdrv_cow_bs(bs
);
2557 ret
|= BDRV_BLOCK_ZERO
;
2558 } else if (want_zero
) {
2559 int64_t size2
= bdrv_co_getlength(cow_bs
);
2561 if (size2
>= 0 && offset
>= size2
) {
2562 ret
|= BDRV_BLOCK_ZERO
;
2567 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2568 local_file
&& local_file
!= bs
&&
2569 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2570 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2574 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2575 *pnum
, &file_pnum
, NULL
, NULL
);
2577 /* Ignore errors. This is just providing extra information, it
2578 * is useful but not necessary.
2580 if (ret2
& BDRV_BLOCK_EOF
&&
2581 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2583 * It is valid for the format block driver to read
2584 * beyond the end of the underlying file's current
2585 * size; such areas read as zero.
2587 ret
|= BDRV_BLOCK_ZERO
;
2589 /* Limit request to the range reported by the protocol driver */
2591 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2597 bdrv_dec_in_flight(bs
);
2598 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2599 ret
|= BDRV_BLOCK_EOF
;
2612 bdrv_co_common_block_status_above(BlockDriverState
*bs
,
2613 BlockDriverState
*base
,
2620 BlockDriverState
**file
,
2624 BlockDriverState
*p
;
2629 assert(!include_base
|| base
); /* Can't include NULL base */
2630 assert_bdrv_graph_readable();
2637 if (!include_base
&& bs
== base
) {
2642 ret
= bdrv_co_block_status(bs
, want_zero
, offset
, bytes
, pnum
, map
, file
);
2644 if (ret
< 0 || *pnum
== 0 || ret
& BDRV_BLOCK_ALLOCATED
|| bs
== base
) {
2648 if (ret
& BDRV_BLOCK_EOF
) {
2649 eof
= offset
+ *pnum
;
2652 assert(*pnum
<= bytes
);
2655 for (p
= bdrv_filter_or_cow_bs(bs
); include_base
|| p
!= base
;
2656 p
= bdrv_filter_or_cow_bs(p
))
2658 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2666 * The top layer deferred to this layer, and because this layer is
2667 * short, any zeroes that we synthesize beyond EOF behave as if they
2668 * were allocated at this layer.
2670 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2671 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2674 assert(ret
& BDRV_BLOCK_EOF
);
2679 ret
= BDRV_BLOCK_ZERO
| BDRV_BLOCK_ALLOCATED
;
2682 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2684 * We've found the node and the status, we must break.
2686 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2687 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2690 ret
&= ~BDRV_BLOCK_EOF
;
2695 assert(include_base
);
2700 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2701 * let's continue the diving.
2703 assert(*pnum
<= bytes
);
2707 if (offset
+ *pnum
== eof
) {
2708 ret
|= BDRV_BLOCK_EOF
;
2714 int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2715 BlockDriverState
*base
,
2716 int64_t offset
, int64_t bytes
,
2717 int64_t *pnum
, int64_t *map
,
2718 BlockDriverState
**file
)
2721 return bdrv_co_common_block_status_above(bs
, base
, false, true, offset
,
2722 bytes
, pnum
, map
, file
, NULL
);
2725 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2726 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2727 int64_t *map
, BlockDriverState
**file
)
2730 return bdrv_common_block_status_above(bs
, base
, false, true, offset
, bytes
,
2731 pnum
, map
, file
, NULL
);
2734 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2735 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2738 return bdrv_block_status_above(bs
, bdrv_filter_or_cow_bs(bs
),
2739 offset
, bytes
, pnum
, map
, file
);
2743 * Check @bs (and its backing chain) to see if the range defined
2744 * by @offset and @bytes is known to read as zeroes.
2745 * Return 1 if that is the case, 0 otherwise and -errno on error.
2746 * This test is meant to be fast rather than accurate so returning 0
2747 * does not guarantee non-zero data.
2749 int coroutine_fn
bdrv_co_is_zero_fast(BlockDriverState
*bs
, int64_t offset
,
2753 int64_t pnum
= bytes
;
2760 ret
= bdrv_co_common_block_status_above(bs
, NULL
, false, false, offset
,
2761 bytes
, &pnum
, NULL
, NULL
, NULL
);
2767 return (pnum
== bytes
) && (ret
& BDRV_BLOCK_ZERO
);
2770 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2771 int64_t bytes
, int64_t *pnum
)
2777 ret
= bdrv_co_common_block_status_above(bs
, bs
, true, false, offset
,
2778 bytes
, pnum
? pnum
: &dummy
, NULL
,
2783 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2786 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2793 ret
= bdrv_common_block_status_above(bs
, bs
, true, false, offset
,
2794 bytes
, pnum
? pnum
: &dummy
, NULL
,
2799 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2802 /* See bdrv_is_allocated_above for documentation */
2803 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2804 BlockDriverState
*base
,
2805 bool include_base
, int64_t offset
,
2806 int64_t bytes
, int64_t *pnum
)
2812 ret
= bdrv_co_common_block_status_above(top
, base
, include_base
, false,
2813 offset
, bytes
, pnum
, NULL
, NULL
,
2819 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2826 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2828 * Return a positive depth if (a prefix of) the given range is allocated
2829 * in any image between BASE and TOP (BASE is only included if include_base
2830 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2831 * BASE can be NULL to check if the given offset is allocated in any
2832 * image of the chain. Return 0 otherwise, or negative errno on
2835 * 'pnum' is set to the number of bytes (including and immediately
2836 * following the specified offset) that are known to be in the same
2837 * allocated/unallocated state. Note that a subsequent call starting
2838 * at 'offset + *pnum' may return the same allocation status (in other
2839 * words, the result is not necessarily the maximum possible range);
2840 * but 'pnum' will only be 0 when end of file is reached.
2842 int bdrv_is_allocated_above(BlockDriverState
*top
,
2843 BlockDriverState
*base
,
2844 bool include_base
, int64_t offset
,
2845 int64_t bytes
, int64_t *pnum
)
2851 ret
= bdrv_common_block_status_above(top
, base
, include_base
, false,
2852 offset
, bytes
, pnum
, NULL
, NULL
,
2858 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2865 bdrv_co_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2867 BlockDriver
*drv
= bs
->drv
;
2868 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2871 assert_bdrv_graph_readable();
2873 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2882 bdrv_inc_in_flight(bs
);
2884 if (drv
->bdrv_co_load_vmstate
) {
2885 ret
= drv
->bdrv_co_load_vmstate(bs
, qiov
, pos
);
2886 } else if (child_bs
) {
2887 ret
= bdrv_co_readv_vmstate(child_bs
, qiov
, pos
);
2892 bdrv_dec_in_flight(bs
);
2898 bdrv_co_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2900 BlockDriver
*drv
= bs
->drv
;
2901 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2904 assert_bdrv_graph_readable();
2906 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2915 bdrv_inc_in_flight(bs
);
2917 if (drv
->bdrv_co_save_vmstate
) {
2918 ret
= drv
->bdrv_co_save_vmstate(bs
, qiov
, pos
);
2919 } else if (child_bs
) {
2920 ret
= bdrv_co_writev_vmstate(child_bs
, qiov
, pos
);
2925 bdrv_dec_in_flight(bs
);
2930 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2931 int64_t pos
, int size
)
2933 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2934 int ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2937 return ret
< 0 ? ret
: size
;
2940 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2941 int64_t pos
, int size
)
2943 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2944 int ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2947 return ret
< 0 ? ret
: size
;
2950 /**************************************************************/
2953 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2957 bdrv_aio_cancel_async(acb
);
2958 while (acb
->refcnt
> 1) {
2959 if (acb
->aiocb_info
->get_aio_context
) {
2960 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2961 } else if (acb
->bs
) {
2962 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2963 * assert that we're not using an I/O thread. Thread-safe
2964 * code should use bdrv_aio_cancel_async exclusively.
2966 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2967 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2972 qemu_aio_unref(acb
);
2975 /* Async version of aio cancel. The caller is not blocked if the acb implements
2976 * cancel_async, otherwise we do nothing and let the request normally complete.
2977 * In either case the completion callback must be called. */
2978 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2981 if (acb
->aiocb_info
->cancel_async
) {
2982 acb
->aiocb_info
->cancel_async(acb
);
2986 /**************************************************************/
2987 /* Coroutine block device emulation */
2989 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2991 BdrvChild
*primary_child
= bdrv_primary_child(bs
);
2997 assert_bdrv_graph_readable();
2998 bdrv_inc_in_flight(bs
);
3000 if (!bdrv_co_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
3005 qemu_mutex_lock(&bs
->reqs_lock
);
3006 current_gen
= qatomic_read(&bs
->write_gen
);
3008 /* Wait until any previous flushes are completed */
3009 while (bs
->active_flush_req
) {
3010 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
3013 /* Flushes reach this point in nondecreasing current_gen order. */
3014 bs
->active_flush_req
= true;
3015 qemu_mutex_unlock(&bs
->reqs_lock
);
3017 /* Write back all layers by calling one driver function */
3018 if (bs
->drv
->bdrv_co_flush
) {
3019 ret
= bs
->drv
->bdrv_co_flush(bs
);
3023 /* Write back cached data to the OS even with cache=unsafe */
3024 BLKDBG_CO_EVENT(primary_child
, BLKDBG_FLUSH_TO_OS
);
3025 if (bs
->drv
->bdrv_co_flush_to_os
) {
3026 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
3032 /* But don't actually force it to the disk with cache=unsafe */
3033 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
3034 goto flush_children
;
3037 /* Check if we really need to flush anything */
3038 if (bs
->flushed_gen
== current_gen
) {
3039 goto flush_children
;
3042 BLKDBG_CO_EVENT(primary_child
, BLKDBG_FLUSH_TO_DISK
);
3044 /* bs->drv->bdrv_co_flush() might have ejected the BDS
3045 * (even in case of apparent success) */
3049 if (bs
->drv
->bdrv_co_flush_to_disk
) {
3050 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
3051 } else if (bs
->drv
->bdrv_aio_flush
) {
3053 CoroutineIOCompletion co
= {
3054 .coroutine
= qemu_coroutine_self(),
3057 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
3061 qemu_coroutine_yield();
3066 * Some block drivers always operate in either writethrough or unsafe
3067 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3068 * know how the server works (because the behaviour is hardcoded or
3069 * depends on server-side configuration), so we can't ensure that
3070 * everything is safe on disk. Returning an error doesn't work because
3071 * that would break guests even if the server operates in writethrough
3074 * Let's hope the user knows what he's doing.
3083 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3084 * in the case of cache=unsafe, so there are no useless flushes.
3088 QLIST_FOREACH(child
, &bs
->children
, next
) {
3089 if (child
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
)) {
3090 int this_child_ret
= bdrv_co_flush(child
->bs
);
3092 ret
= this_child_ret
;
3098 /* Notify any pending flushes that we have completed */
3100 bs
->flushed_gen
= current_gen
;
3103 qemu_mutex_lock(&bs
->reqs_lock
);
3104 bs
->active_flush_req
= false;
3105 /* Return value is ignored - it's ok if wait queue is empty */
3106 qemu_co_queue_next(&bs
->flush_queue
);
3107 qemu_mutex_unlock(&bs
->reqs_lock
);
3110 bdrv_dec_in_flight(bs
);
3114 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
3117 BdrvTrackedRequest req
;
3119 int64_t max_pdiscard
;
3120 int head
, tail
, align
;
3121 BlockDriverState
*bs
= child
->bs
;
3123 assert_bdrv_graph_readable();
3125 if (!bs
|| !bs
->drv
|| !bdrv_co_is_inserted(bs
)) {
3129 if (bdrv_has_readonly_bitmaps(bs
)) {
3133 ret
= bdrv_check_request(offset
, bytes
, NULL
);
3138 /* Do nothing if disabled. */
3139 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3143 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
3147 /* Invalidate the cached block-status data range if this discard overlaps */
3148 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
3150 /* Discard is advisory, but some devices track and coalesce
3151 * unaligned requests, so we must pass everything down rather than
3152 * round here. Still, most devices will just silently ignore
3153 * unaligned requests (by returning -ENOTSUP), so we must fragment
3154 * the request accordingly. */
3155 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
3156 assert(align
% bs
->bl
.request_alignment
== 0);
3157 head
= offset
% align
;
3158 tail
= (offset
+ bytes
) % align
;
3160 bdrv_inc_in_flight(bs
);
3161 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
3163 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
3168 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT64_MAX
),
3170 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
3173 int64_t num
= bytes
;
3176 /* Make small requests to get to alignment boundaries. */
3177 num
= MIN(bytes
, align
- head
);
3178 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
3179 num
%= bs
->bl
.request_alignment
;
3181 head
= (head
+ num
) % align
;
3182 assert(num
< max_pdiscard
);
3185 /* Shorten the request to the last aligned cluster. */
3187 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
3188 tail
> bs
->bl
.request_alignment
) {
3189 tail
%= bs
->bl
.request_alignment
;
3193 /* limit request size */
3194 if (num
> max_pdiscard
) {
3202 if (bs
->drv
->bdrv_co_pdiscard
) {
3203 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
3206 CoroutineIOCompletion co
= {
3207 .coroutine
= qemu_coroutine_self(),
3210 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
3211 bdrv_co_io_em_complete
, &co
);
3216 qemu_coroutine_yield();
3220 if (ret
&& ret
!= -ENOTSUP
) {
3229 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
3230 tracked_request_end(&req
);
3231 bdrv_dec_in_flight(bs
);
3235 int coroutine_fn
bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
3237 BlockDriver
*drv
= bs
->drv
;
3238 CoroutineIOCompletion co
= {
3239 .coroutine
= qemu_coroutine_self(),
3243 assert_bdrv_graph_readable();
3245 bdrv_inc_in_flight(bs
);
3246 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
3251 if (drv
->bdrv_co_ioctl
) {
3252 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
3254 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3259 qemu_coroutine_yield();
3262 bdrv_dec_in_flight(bs
);
3266 int coroutine_fn
bdrv_co_zone_report(BlockDriverState
*bs
, int64_t offset
,
3267 unsigned int *nr_zones
,
3268 BlockZoneDescriptor
*zones
)
3270 BlockDriver
*drv
= bs
->drv
;
3271 CoroutineIOCompletion co
= {
3272 .coroutine
= qemu_coroutine_self(),
3276 bdrv_inc_in_flight(bs
);
3277 if (!drv
|| !drv
->bdrv_co_zone_report
|| bs
->bl
.zoned
== BLK_Z_NONE
) {
3281 co
.ret
= drv
->bdrv_co_zone_report(bs
, offset
, nr_zones
, zones
);
3283 bdrv_dec_in_flight(bs
);
3287 int coroutine_fn
bdrv_co_zone_mgmt(BlockDriverState
*bs
, BlockZoneOp op
,
3288 int64_t offset
, int64_t len
)
3290 BlockDriver
*drv
= bs
->drv
;
3291 CoroutineIOCompletion co
= {
3292 .coroutine
= qemu_coroutine_self(),
3296 bdrv_inc_in_flight(bs
);
3297 if (!drv
|| !drv
->bdrv_co_zone_mgmt
|| bs
->bl
.zoned
== BLK_Z_NONE
) {
3301 co
.ret
= drv
->bdrv_co_zone_mgmt(bs
, op
, offset
, len
);
3303 bdrv_dec_in_flight(bs
);
3307 int coroutine_fn
bdrv_co_zone_append(BlockDriverState
*bs
, int64_t *offset
,
3309 BdrvRequestFlags flags
)
3312 BlockDriver
*drv
= bs
->drv
;
3313 CoroutineIOCompletion co
= {
3314 .coroutine
= qemu_coroutine_self(),
3318 ret
= bdrv_check_qiov_request(*offset
, qiov
->size
, qiov
, 0, NULL
);
3323 bdrv_inc_in_flight(bs
);
3324 if (!drv
|| !drv
->bdrv_co_zone_append
|| bs
->bl
.zoned
== BLK_Z_NONE
) {
3328 co
.ret
= drv
->bdrv_co_zone_append(bs
, offset
, qiov
, flags
);
3330 bdrv_dec_in_flight(bs
);
3334 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3337 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3340 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3343 return memset(qemu_blockalign(bs
, size
), 0, size
);
3346 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3348 size_t align
= bdrv_opt_mem_align(bs
);
3351 /* Ensure that NULL is never returned on success */
3357 return qemu_try_memalign(align
, size
);
3360 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3362 void *mem
= qemu_try_blockalign(bs
, size
);
3366 memset(mem
, 0, size
);
3372 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3373 static void GRAPH_RDLOCK
3374 bdrv_register_buf_rollback(BlockDriverState
*bs
, void *host
, size_t size
,
3375 BdrvChild
*final_child
)
3379 GLOBAL_STATE_CODE();
3380 assert_bdrv_graph_readable();
3382 QLIST_FOREACH(child
, &bs
->children
, next
) {
3383 if (child
== final_child
) {
3387 bdrv_unregister_buf(child
->bs
, host
, size
);
3390 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3391 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3395 bool bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
,
3400 GLOBAL_STATE_CODE();
3401 GRAPH_RDLOCK_GUARD_MAINLOOP();
3403 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3404 if (!bs
->drv
->bdrv_register_buf(bs
, host
, size
, errp
)) {
3408 QLIST_FOREACH(child
, &bs
->children
, next
) {
3409 if (!bdrv_register_buf(child
->bs
, host
, size
, errp
)) {
3410 bdrv_register_buf_rollback(bs
, host
, size
, child
);
3417 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3421 GLOBAL_STATE_CODE();
3422 GRAPH_RDLOCK_GUARD_MAINLOOP();
3424 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3425 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3427 QLIST_FOREACH(child
, &bs
->children
, next
) {
3428 bdrv_unregister_buf(child
->bs
, host
, size
);
3432 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_copy_range_internal(
3433 BdrvChild
*src
, int64_t src_offset
, BdrvChild
*dst
,
3434 int64_t dst_offset
, int64_t bytes
,
3435 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3438 BdrvTrackedRequest req
;
3440 assert_bdrv_graph_readable();
3442 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3443 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3444 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3445 assert(!(read_flags
& BDRV_REQ_NO_WAIT
));
3446 assert(!(write_flags
& BDRV_REQ_NO_WAIT
));
3448 if (!dst
|| !dst
->bs
|| !bdrv_co_is_inserted(dst
->bs
)) {
3451 ret
= bdrv_check_request32(dst_offset
, bytes
, NULL
, 0);
3455 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3456 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3459 if (!src
|| !src
->bs
|| !bdrv_co_is_inserted(src
->bs
)) {
3462 ret
= bdrv_check_request32(src_offset
, bytes
, NULL
, 0);
3467 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3468 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3469 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3474 bdrv_inc_in_flight(src
->bs
);
3475 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3478 /* BDRV_REQ_SERIALISING is only for write operation */
3479 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3480 bdrv_wait_serialising_requests(&req
);
3482 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3486 read_flags
, write_flags
);
3488 tracked_request_end(&req
);
3489 bdrv_dec_in_flight(src
->bs
);
3491 bdrv_inc_in_flight(dst
->bs
);
3492 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3493 BDRV_TRACKED_WRITE
);
3494 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3497 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3501 read_flags
, write_flags
);
3503 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3504 tracked_request_end(&req
);
3505 bdrv_dec_in_flight(dst
->bs
);
3511 /* Copy range from @src to @dst.
3513 * See the comment of bdrv_co_copy_range for the parameter and return value
3515 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, int64_t src_offset
,
3516 BdrvChild
*dst
, int64_t dst_offset
,
3518 BdrvRequestFlags read_flags
,
3519 BdrvRequestFlags write_flags
)
3522 assert_bdrv_graph_readable();
3523 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3524 read_flags
, write_flags
);
3525 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3526 bytes
, read_flags
, write_flags
, true);
3529 /* Copy range from @src to @dst.
3531 * See the comment of bdrv_co_copy_range for the parameter and return value
3533 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, int64_t src_offset
,
3534 BdrvChild
*dst
, int64_t dst_offset
,
3536 BdrvRequestFlags read_flags
,
3537 BdrvRequestFlags write_flags
)
3540 assert_bdrv_graph_readable();
3541 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3542 read_flags
, write_flags
);
3543 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3544 bytes
, read_flags
, write_flags
, false);
3547 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, int64_t src_offset
,
3548 BdrvChild
*dst
, int64_t dst_offset
,
3549 int64_t bytes
, BdrvRequestFlags read_flags
,
3550 BdrvRequestFlags write_flags
)
3553 assert_bdrv_graph_readable();
3555 return bdrv_co_copy_range_from(src
, src_offset
,
3557 bytes
, read_flags
, write_flags
);
3560 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3563 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3564 if (c
->klass
->resize
) {
3565 c
->klass
->resize(c
);
3571 * Truncate file to 'offset' bytes (needed only for file protocols)
3573 * If 'exact' is true, the file must be resized to exactly the given
3574 * 'offset'. Otherwise, it is sufficient for the node to be at least
3575 * 'offset' bytes in length.
3577 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3578 PreallocMode prealloc
, BdrvRequestFlags flags
,
3581 BlockDriverState
*bs
= child
->bs
;
3582 BdrvChild
*filtered
, *backing
;
3583 BlockDriver
*drv
= bs
->drv
;
3584 BdrvTrackedRequest req
;
3585 int64_t old_size
, new_bytes
;
3588 assert_bdrv_graph_readable();
3590 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3592 error_setg(errp
, "No medium inserted");
3596 error_setg(errp
, "Image size cannot be negative");
3600 ret
= bdrv_check_request(offset
, 0, errp
);
3605 old_size
= bdrv_co_getlength(bs
);
3607 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3611 if (bdrv_is_read_only(bs
)) {
3612 error_setg(errp
, "Image is read-only");
3616 if (offset
> old_size
) {
3617 new_bytes
= offset
- old_size
;
3622 bdrv_inc_in_flight(bs
);
3623 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3624 BDRV_TRACKED_TRUNCATE
);
3626 /* If we are growing the image and potentially using preallocation for the
3627 * new area, we need to make sure that no write requests are made to it
3628 * concurrently or they might be overwritten by preallocation. */
3630 bdrv_make_request_serialising(&req
, 1);
3632 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3635 error_setg_errno(errp
, -ret
,
3636 "Failed to prepare request for truncation");
3640 filtered
= bdrv_filter_child(bs
);
3641 backing
= bdrv_cow_child(bs
);
3644 * If the image has a backing file that is large enough that it would
3645 * provide data for the new area, we cannot leave it unallocated because
3646 * then the backing file content would become visible. Instead, zero-fill
3649 * Note that if the image has a backing file, but was opened without the
3650 * backing file, taking care of keeping things consistent with that backing
3651 * file is the user's responsibility.
3653 if (new_bytes
&& backing
) {
3654 int64_t backing_len
;
3656 backing_len
= bdrv_co_getlength(backing
->bs
);
3657 if (backing_len
< 0) {
3659 error_setg_errno(errp
, -ret
, "Could not get backing file size");
3663 if (backing_len
> old_size
) {
3664 flags
|= BDRV_REQ_ZERO_WRITE
;
3668 if (drv
->bdrv_co_truncate
) {
3669 if (flags
& ~bs
->supported_truncate_flags
) {
3670 error_setg(errp
, "Block driver does not support requested flags");
3674 ret
= drv
->bdrv_co_truncate(bs
, offset
, exact
, prealloc
, flags
, errp
);
3675 } else if (filtered
) {
3676 ret
= bdrv_co_truncate(filtered
, offset
, exact
, prealloc
, flags
, errp
);
3678 error_setg(errp
, "Image format driver does not support resize");
3686 ret
= bdrv_co_refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3688 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3690 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3693 * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3694 * failed, but the latter doesn't affect how we should finish the request.
3695 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3697 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3700 tracked_request_end(&req
);
3701 bdrv_dec_in_flight(bs
);
3706 void bdrv_cancel_in_flight(BlockDriverState
*bs
)
3708 GLOBAL_STATE_CODE();
3709 if (!bs
|| !bs
->drv
) {
3713 if (bs
->drv
->bdrv_cancel_in_flight
) {
3714 bs
->drv
->bdrv_cancel_in_flight(bs
);
3719 bdrv_co_preadv_snapshot(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
3720 QEMUIOVector
*qiov
, size_t qiov_offset
)
3722 BlockDriverState
*bs
= child
->bs
;
3723 BlockDriver
*drv
= bs
->drv
;
3726 assert_bdrv_graph_readable();
3732 if (!drv
->bdrv_co_preadv_snapshot
) {
3736 bdrv_inc_in_flight(bs
);
3737 ret
= drv
->bdrv_co_preadv_snapshot(bs
, offset
, bytes
, qiov
, qiov_offset
);
3738 bdrv_dec_in_flight(bs
);
3744 bdrv_co_snapshot_block_status(BlockDriverState
*bs
,
3745 bool want_zero
, int64_t offset
, int64_t bytes
,
3746 int64_t *pnum
, int64_t *map
,
3747 BlockDriverState
**file
)
3749 BlockDriver
*drv
= bs
->drv
;
3752 assert_bdrv_graph_readable();
3758 if (!drv
->bdrv_co_snapshot_block_status
) {
3762 bdrv_inc_in_flight(bs
);
3763 ret
= drv
->bdrv_co_snapshot_block_status(bs
, want_zero
, offset
, bytes
,
3765 bdrv_dec_in_flight(bs
);
3771 bdrv_co_pdiscard_snapshot(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
3773 BlockDriver
*drv
= bs
->drv
;
3776 assert_bdrv_graph_readable();
3782 if (!drv
->bdrv_co_pdiscard_snapshot
) {
3786 bdrv_inc_in_flight(bs
);
3787 ret
= drv
->bdrv_co_pdiscard_snapshot(bs
, offset
, bytes
);
3788 bdrv_dec_in_flight(bs
);