2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/write-threshold.h"
34 #include "qemu/cutils.h"
35 #include "qemu/memalign.h"
36 #include "qapi/error.h"
37 #include "qemu/error-report.h"
38 #include "qemu/main-loop.h"
39 #include "sysemu/replay.h"
41 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
42 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
45 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
46 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
);
48 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
)
52 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
56 bdrv_parent_drained_begin_single(c
);
60 void bdrv_parent_drained_end_single(BdrvChild
*c
)
64 assert(c
->quiesced_parent
);
65 c
->quiesced_parent
= false;
67 if (c
->klass
->drained_end
) {
68 c
->klass
->drained_end(c
);
72 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
)
76 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
80 bdrv_parent_drained_end_single(c
);
84 bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
86 if (c
->klass
->drained_poll
) {
87 return c
->klass
->drained_poll(c
);
92 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
93 bool ignore_bds_parents
)
98 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
99 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
102 busy
|= bdrv_parent_drained_poll_single(c
);
108 void bdrv_parent_drained_begin_single(BdrvChild
*c
)
112 assert(!c
->quiesced_parent
);
113 c
->quiesced_parent
= true;
115 if (c
->klass
->drained_begin
) {
116 c
->klass
->drained_begin(c
);
120 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
122 dst
->pdiscard_alignment
= MAX(dst
->pdiscard_alignment
,
123 src
->pdiscard_alignment
);
124 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
125 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
126 dst
->max_hw_transfer
= MIN_NON_ZERO(dst
->max_hw_transfer
,
127 src
->max_hw_transfer
);
128 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
129 src
->opt_mem_alignment
);
130 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
131 src
->min_mem_alignment
);
132 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
133 dst
->max_hw_iov
= MIN_NON_ZERO(dst
->max_hw_iov
, src
->max_hw_iov
);
136 typedef struct BdrvRefreshLimitsState
{
137 BlockDriverState
*bs
;
139 } BdrvRefreshLimitsState
;
141 static void bdrv_refresh_limits_abort(void *opaque
)
143 BdrvRefreshLimitsState
*s
= opaque
;
145 s
->bs
->bl
= s
->old_bl
;
148 static TransactionActionDrv bdrv_refresh_limits_drv
= {
149 .abort
= bdrv_refresh_limits_abort
,
153 /* @tran is allowed to be NULL, in this case no rollback is possible. */
154 void bdrv_refresh_limits(BlockDriverState
*bs
, Transaction
*tran
, Error
**errp
)
157 BlockDriver
*drv
= bs
->drv
;
164 BdrvRefreshLimitsState
*s
= g_new(BdrvRefreshLimitsState
, 1);
165 *s
= (BdrvRefreshLimitsState
) {
169 tran_add(tran
, &bdrv_refresh_limits_drv
, s
);
172 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
178 /* Default alignment based on whether driver has byte interface */
179 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
180 drv
->bdrv_aio_preadv
||
181 drv
->bdrv_co_preadv_part
) ? 1 : 512;
183 /* Take some limits from the children as a default */
185 QLIST_FOREACH(c
, &bs
->children
, next
) {
186 if (c
->role
& (BDRV_CHILD_DATA
| BDRV_CHILD_FILTERED
| BDRV_CHILD_COW
))
188 bdrv_merge_limits(&bs
->bl
, &c
->bs
->bl
);
194 bs
->bl
.min_mem_alignment
= 512;
195 bs
->bl
.opt_mem_alignment
= qemu_real_host_page_size();
197 /* Safe default since most protocols use readv()/writev()/etc */
198 bs
->bl
.max_iov
= IOV_MAX
;
201 /* Then let the driver override it */
202 if (drv
->bdrv_refresh_limits
) {
203 drv
->bdrv_refresh_limits(bs
, errp
);
209 if (bs
->bl
.request_alignment
> BDRV_MAX_ALIGNMENT
) {
210 error_setg(errp
, "Driver requires too large request alignment");
215 * The copy-on-read flag is actually a reference count so multiple users may
216 * use the feature without worrying about clobbering its previous state.
217 * Copy-on-read stays enabled until all users have called to disable it.
219 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
222 qatomic_inc(&bs
->copy_on_read
);
225 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
227 int old
= qatomic_fetch_dec(&bs
->copy_on_read
);
234 BlockDriverState
*bs
;
241 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
242 bool bdrv_drain_poll(BlockDriverState
*bs
, BdrvChild
*ignore_parent
,
243 bool ignore_bds_parents
)
247 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
251 if (qatomic_read(&bs
->in_flight
)) {
258 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
,
259 BdrvChild
*ignore_parent
)
261 return bdrv_drain_poll(bs
, ignore_parent
, false);
264 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
266 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
);
268 static void bdrv_co_drain_bh_cb(void *opaque
)
270 BdrvCoDrainData
*data
= opaque
;
271 Coroutine
*co
= data
->co
;
272 BlockDriverState
*bs
= data
->bs
;
275 AioContext
*ctx
= bdrv_get_aio_context(bs
);
276 aio_context_acquire(ctx
);
277 bdrv_dec_in_flight(bs
);
279 bdrv_do_drained_begin(bs
, data
->parent
, data
->poll
);
282 bdrv_do_drained_end(bs
, data
->parent
);
284 aio_context_release(ctx
);
287 bdrv_drain_all_begin();
294 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
299 BdrvCoDrainData data
;
300 Coroutine
*self
= qemu_coroutine_self();
301 AioContext
*ctx
= bdrv_get_aio_context(bs
);
302 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(self
);
304 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
305 * other coroutines run if they were queued by aio_co_enter(). */
307 assert(qemu_in_coroutine());
308 data
= (BdrvCoDrainData
) {
318 bdrv_inc_in_flight(bs
);
322 * Temporarily drop the lock across yield or we would get deadlocks.
323 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
325 * When we yield below, the lock for the current context will be
326 * released, so if this is actually the lock that protects bs, don't drop
330 aio_context_release(ctx
);
332 replay_bh_schedule_oneshot_event(ctx
, bdrv_co_drain_bh_cb
, &data
);
334 qemu_coroutine_yield();
335 /* If we are resumed from some other event (such as an aio completion or a
336 * timer callback), it is a bug in the caller that should be fixed. */
339 /* Reaquire the AioContext of bs if we dropped it */
341 aio_context_acquire(ctx
);
345 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
350 if (qemu_in_coroutine()) {
351 bdrv_co_yield_to_drain(bs
, true, parent
, poll
);
355 /* Stop things in parent-to-child order */
356 if (qatomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
357 aio_disable_external(bdrv_get_aio_context(bs
));
358 bdrv_parent_drained_begin(bs
, parent
);
359 if (bs
->drv
&& bs
->drv
->bdrv_drain_begin
) {
360 bs
->drv
->bdrv_drain_begin(bs
);
365 * Wait for drained requests to finish.
367 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
368 * call is needed so things in this AioContext can make progress even
369 * though we don't return to the main AioContext loop - this automatically
370 * includes other nodes in the same AioContext and therefore all child
374 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, parent
));
378 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
, BdrvChild
*parent
)
380 bdrv_do_drained_begin(bs
, parent
, false);
383 void bdrv_drained_begin(BlockDriverState
*bs
)
386 bdrv_do_drained_begin(bs
, NULL
, true);
390 * This function does not poll, nor must any of its recursively called
393 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
)
395 int old_quiesce_counter
;
397 if (qemu_in_coroutine()) {
398 bdrv_co_yield_to_drain(bs
, false, parent
, false);
401 assert(bs
->quiesce_counter
> 0);
403 /* Re-enable things in child-to-parent order */
404 old_quiesce_counter
= qatomic_fetch_dec(&bs
->quiesce_counter
);
405 if (old_quiesce_counter
== 1) {
406 if (bs
->drv
&& bs
->drv
->bdrv_drain_end
) {
407 bs
->drv
->bdrv_drain_end(bs
);
409 bdrv_parent_drained_end(bs
, parent
);
410 aio_enable_external(bdrv_get_aio_context(bs
));
414 void bdrv_drained_end(BlockDriverState
*bs
)
417 bdrv_do_drained_end(bs
, NULL
);
420 void bdrv_drain(BlockDriverState
*bs
)
423 bdrv_drained_begin(bs
);
424 bdrv_drained_end(bs
);
427 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
429 BdrvChild
*child
, *next
;
431 assert(qatomic_read(&bs
->in_flight
) == 0);
432 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
433 bdrv_drain_assert_idle(child
->bs
);
437 unsigned int bdrv_drain_all_count
= 0;
439 static bool bdrv_drain_all_poll(void)
441 BlockDriverState
*bs
= NULL
;
445 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
446 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
447 while ((bs
= bdrv_next_all_states(bs
))) {
448 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
449 aio_context_acquire(aio_context
);
450 result
|= bdrv_drain_poll(bs
, NULL
, true);
451 aio_context_release(aio_context
);
458 * Wait for pending requests to complete across all BlockDriverStates
460 * This function does not flush data to disk, use bdrv_flush_all() for that
461 * after calling this function.
463 * This pauses all block jobs and disables external clients. It must
464 * be paired with bdrv_drain_all_end().
466 * NOTE: no new block jobs or BlockDriverStates can be created between
467 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
469 void bdrv_drain_all_begin_nopoll(void)
471 BlockDriverState
*bs
= NULL
;
475 * bdrv queue is managed by record/replay,
476 * waiting for finishing the I/O requests may
479 if (replay_events_enabled()) {
483 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
484 * loop AioContext, so make sure we're in the main context. */
485 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
486 assert(bdrv_drain_all_count
< INT_MAX
);
487 bdrv_drain_all_count
++;
489 /* Quiesce all nodes, without polling in-flight requests yet. The graph
490 * cannot change during this loop. */
491 while ((bs
= bdrv_next_all_states(bs
))) {
492 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
494 aio_context_acquire(aio_context
);
495 bdrv_do_drained_begin(bs
, NULL
, false);
496 aio_context_release(aio_context
);
500 void bdrv_drain_all_begin(void)
502 BlockDriverState
*bs
= NULL
;
504 if (qemu_in_coroutine()) {
505 bdrv_co_yield_to_drain(NULL
, true, NULL
, true);
510 * bdrv queue is managed by record/replay,
511 * waiting for finishing the I/O requests may
514 if (replay_events_enabled()) {
518 bdrv_drain_all_begin_nopoll();
520 /* Now poll the in-flight requests */
521 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
523 while ((bs
= bdrv_next_all_states(bs
))) {
524 bdrv_drain_assert_idle(bs
);
528 void bdrv_drain_all_end_quiesce(BlockDriverState
*bs
)
532 g_assert(bs
->quiesce_counter
> 0);
533 g_assert(!bs
->refcnt
);
535 while (bs
->quiesce_counter
) {
536 bdrv_do_drained_end(bs
, NULL
);
540 void bdrv_drain_all_end(void)
542 BlockDriverState
*bs
= NULL
;
546 * bdrv queue is managed by record/replay,
547 * waiting for finishing the I/O requests may
550 if (replay_events_enabled()) {
554 while ((bs
= bdrv_next_all_states(bs
))) {
555 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
557 aio_context_acquire(aio_context
);
558 bdrv_do_drained_end(bs
, NULL
);
559 aio_context_release(aio_context
);
562 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
563 assert(bdrv_drain_all_count
> 0);
564 bdrv_drain_all_count
--;
567 void bdrv_drain_all(void)
570 bdrv_drain_all_begin();
571 bdrv_drain_all_end();
575 * Remove an active request from the tracked requests list
577 * This function should be called when a tracked request is completing.
579 static void coroutine_fn
tracked_request_end(BdrvTrackedRequest
*req
)
581 if (req
->serialising
) {
582 qatomic_dec(&req
->bs
->serialising_in_flight
);
585 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
586 QLIST_REMOVE(req
, list
);
587 qemu_co_queue_restart_all(&req
->wait_queue
);
588 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
592 * Add an active request to the tracked requests list
594 static void coroutine_fn
tracked_request_begin(BdrvTrackedRequest
*req
,
595 BlockDriverState
*bs
,
598 enum BdrvTrackedRequestType type
)
600 bdrv_check_request(offset
, bytes
, &error_abort
);
602 *req
= (BdrvTrackedRequest
){
607 .co
= qemu_coroutine_self(),
608 .serialising
= false,
609 .overlap_offset
= offset
,
610 .overlap_bytes
= bytes
,
613 qemu_co_queue_init(&req
->wait_queue
);
615 qemu_co_mutex_lock(&bs
->reqs_lock
);
616 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
617 qemu_co_mutex_unlock(&bs
->reqs_lock
);
620 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
621 int64_t offset
, int64_t bytes
)
623 bdrv_check_request(offset
, bytes
, &error_abort
);
626 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
630 if (req
->overlap_offset
>= offset
+ bytes
) {
636 /* Called with self->bs->reqs_lock held */
637 static coroutine_fn BdrvTrackedRequest
*
638 bdrv_find_conflicting_request(BdrvTrackedRequest
*self
)
640 BdrvTrackedRequest
*req
;
642 QLIST_FOREACH(req
, &self
->bs
->tracked_requests
, list
) {
643 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
646 if (tracked_request_overlaps(req
, self
->overlap_offset
,
647 self
->overlap_bytes
))
650 * Hitting this means there was a reentrant request, for
651 * example, a block driver issuing nested requests. This must
652 * never happen since it means deadlock.
654 assert(qemu_coroutine_self() != req
->co
);
657 * If the request is already (indirectly) waiting for us, or
658 * will wait for us as soon as it wakes up, then just go on
659 * (instead of producing a deadlock in the former case).
661 if (!req
->waiting_for
) {
670 /* Called with self->bs->reqs_lock held */
671 static void coroutine_fn
672 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest
*self
)
674 BdrvTrackedRequest
*req
;
676 while ((req
= bdrv_find_conflicting_request(self
))) {
677 self
->waiting_for
= req
;
678 qemu_co_queue_wait(&req
->wait_queue
, &self
->bs
->reqs_lock
);
679 self
->waiting_for
= NULL
;
683 /* Called with req->bs->reqs_lock held */
684 static void tracked_request_set_serialising(BdrvTrackedRequest
*req
,
687 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
688 int64_t overlap_bytes
=
689 ROUND_UP(req
->offset
+ req
->bytes
, align
) - overlap_offset
;
691 bdrv_check_request(req
->offset
, req
->bytes
, &error_abort
);
693 if (!req
->serialising
) {
694 qatomic_inc(&req
->bs
->serialising_in_flight
);
695 req
->serialising
= true;
698 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
699 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
703 * Return the tracked request on @bs for the current coroutine, or
704 * NULL if there is none.
706 BdrvTrackedRequest
*coroutine_fn
bdrv_co_get_self_request(BlockDriverState
*bs
)
708 BdrvTrackedRequest
*req
;
709 Coroutine
*self
= qemu_coroutine_self();
712 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
713 if (req
->co
== self
) {
722 * Round a region to cluster boundaries
724 void bdrv_round_to_clusters(BlockDriverState
*bs
,
725 int64_t offset
, int64_t bytes
,
726 int64_t *cluster_offset
,
727 int64_t *cluster_bytes
)
731 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
732 *cluster_offset
= offset
;
733 *cluster_bytes
= bytes
;
735 int64_t c
= bdi
.cluster_size
;
736 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
737 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
741 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
746 ret
= bdrv_get_info(bs
, &bdi
);
747 if (ret
< 0 || bdi
.cluster_size
== 0) {
748 return bs
->bl
.request_alignment
;
750 return bdi
.cluster_size
;
754 void bdrv_inc_in_flight(BlockDriverState
*bs
)
757 qatomic_inc(&bs
->in_flight
);
760 void bdrv_wakeup(BlockDriverState
*bs
)
766 void bdrv_dec_in_flight(BlockDriverState
*bs
)
769 qatomic_dec(&bs
->in_flight
);
773 static void coroutine_fn
774 bdrv_wait_serialising_requests(BdrvTrackedRequest
*self
)
776 BlockDriverState
*bs
= self
->bs
;
778 if (!qatomic_read(&bs
->serialising_in_flight
)) {
782 qemu_co_mutex_lock(&bs
->reqs_lock
);
783 bdrv_wait_serialising_requests_locked(self
);
784 qemu_co_mutex_unlock(&bs
->reqs_lock
);
787 void coroutine_fn
bdrv_make_request_serialising(BdrvTrackedRequest
*req
,
792 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
794 tracked_request_set_serialising(req
, align
);
795 bdrv_wait_serialising_requests_locked(req
);
797 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
800 int bdrv_check_qiov_request(int64_t offset
, int64_t bytes
,
801 QEMUIOVector
*qiov
, size_t qiov_offset
,
805 * Check generic offset/bytes correctness
809 error_setg(errp
, "offset is negative: %" PRIi64
, offset
);
814 error_setg(errp
, "bytes is negative: %" PRIi64
, bytes
);
818 if (bytes
> BDRV_MAX_LENGTH
) {
819 error_setg(errp
, "bytes(%" PRIi64
") exceeds maximum(%" PRIi64
")",
820 bytes
, BDRV_MAX_LENGTH
);
824 if (offset
> BDRV_MAX_LENGTH
) {
825 error_setg(errp
, "offset(%" PRIi64
") exceeds maximum(%" PRIi64
")",
826 offset
, BDRV_MAX_LENGTH
);
830 if (offset
> BDRV_MAX_LENGTH
- bytes
) {
831 error_setg(errp
, "sum of offset(%" PRIi64
") and bytes(%" PRIi64
") "
832 "exceeds maximum(%" PRIi64
")", offset
, bytes
,
842 * Check qiov and qiov_offset
845 if (qiov_offset
> qiov
->size
) {
846 error_setg(errp
, "qiov_offset(%zu) overflow io vector size(%zu)",
847 qiov_offset
, qiov
->size
);
851 if (bytes
> qiov
->size
- qiov_offset
) {
852 error_setg(errp
, "bytes(%" PRIi64
") + qiov_offset(%zu) overflow io "
853 "vector size(%zu)", bytes
, qiov_offset
, qiov
->size
);
860 int bdrv_check_request(int64_t offset
, int64_t bytes
, Error
**errp
)
862 return bdrv_check_qiov_request(offset
, bytes
, NULL
, 0, errp
);
865 static int bdrv_check_request32(int64_t offset
, int64_t bytes
,
866 QEMUIOVector
*qiov
, size_t qiov_offset
)
868 int ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
873 if (bytes
> BDRV_REQUEST_MAX_BYTES
) {
881 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
882 * The operation is sped up by checking the block status and only writing
883 * zeroes to the device if they currently do not return zeroes. Optional
884 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
887 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
889 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
892 int64_t target_size
, bytes
, offset
= 0;
893 BlockDriverState
*bs
= child
->bs
;
896 target_size
= bdrv_getlength(bs
);
897 if (target_size
< 0) {
902 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
906 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
910 if (ret
& BDRV_BLOCK_ZERO
) {
914 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
923 * Writes to the file and ensures that no writes are reordered across this
924 * request (acts as a barrier)
926 * Returns 0 on success, -errno in error cases.
928 int coroutine_fn
bdrv_co_pwrite_sync(BdrvChild
*child
, int64_t offset
,
929 int64_t bytes
, const void *buf
,
930 BdrvRequestFlags flags
)
935 ret
= bdrv_co_pwrite(child
, offset
, bytes
, buf
, flags
);
940 ret
= bdrv_co_flush(child
->bs
);
948 typedef struct CoroutineIOCompletion
{
949 Coroutine
*coroutine
;
951 } CoroutineIOCompletion
;
953 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
955 CoroutineIOCompletion
*co
= opaque
;
958 aio_co_wake(co
->coroutine
);
961 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
962 int64_t offset
, int64_t bytes
,
964 size_t qiov_offset
, int flags
)
966 BlockDriver
*drv
= bs
->drv
;
968 unsigned int nb_sectors
;
969 QEMUIOVector local_qiov
;
972 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
973 assert(!(flags
& ~bs
->supported_read_flags
));
979 if (drv
->bdrv_co_preadv_part
) {
980 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
984 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
985 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
989 if (drv
->bdrv_co_preadv
) {
990 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
994 if (drv
->bdrv_aio_preadv
) {
996 CoroutineIOCompletion co
= {
997 .coroutine
= qemu_coroutine_self(),
1000 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1001 bdrv_co_io_em_complete
, &co
);
1006 qemu_coroutine_yield();
1012 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1013 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1015 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1016 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1017 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1018 assert(drv
->bdrv_co_readv
);
1020 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1023 if (qiov
== &local_qiov
) {
1024 qemu_iovec_destroy(&local_qiov
);
1030 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
1031 int64_t offset
, int64_t bytes
,
1034 BdrvRequestFlags flags
)
1036 BlockDriver
*drv
= bs
->drv
;
1037 bool emulate_fua
= false;
1039 unsigned int nb_sectors
;
1040 QEMUIOVector local_qiov
;
1043 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1049 if ((flags
& BDRV_REQ_FUA
) &&
1050 (~bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1051 flags
&= ~BDRV_REQ_FUA
;
1055 flags
&= bs
->supported_write_flags
;
1057 if (drv
->bdrv_co_pwritev_part
) {
1058 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1063 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1064 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1068 if (drv
->bdrv_co_pwritev
) {
1069 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1073 if (drv
->bdrv_aio_pwritev
) {
1075 CoroutineIOCompletion co
= {
1076 .coroutine
= qemu_coroutine_self(),
1079 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
, flags
,
1080 bdrv_co_io_em_complete
, &co
);
1084 qemu_coroutine_yield();
1090 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1091 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1093 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1094 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1095 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1097 assert(drv
->bdrv_co_writev
);
1098 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
, flags
);
1101 if (ret
== 0 && emulate_fua
) {
1102 ret
= bdrv_co_flush(bs
);
1105 if (qiov
== &local_qiov
) {
1106 qemu_iovec_destroy(&local_qiov
);
1112 static int coroutine_fn
1113 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, int64_t offset
,
1114 int64_t bytes
, QEMUIOVector
*qiov
,
1117 BlockDriver
*drv
= bs
->drv
;
1118 QEMUIOVector local_qiov
;
1121 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1127 if (!block_driver_can_compress(drv
)) {
1131 if (drv
->bdrv_co_pwritev_compressed_part
) {
1132 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1136 if (qiov_offset
== 0) {
1137 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1140 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1141 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1142 qemu_iovec_destroy(&local_qiov
);
1147 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
1148 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1149 size_t qiov_offset
, int flags
)
1151 BlockDriverState
*bs
= child
->bs
;
1153 /* Perform I/O through a temporary buffer so that users who scribble over
1154 * their read buffer while the operation is in progress do not end up
1155 * modifying the image file. This is critical for zero-copy guest I/O
1156 * where anything might happen inside guest memory.
1158 void *bounce_buffer
= NULL
;
1160 BlockDriver
*drv
= bs
->drv
;
1161 int64_t cluster_offset
;
1162 int64_t cluster_bytes
;
1165 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1166 BDRV_REQUEST_MAX_BYTES
);
1167 int64_t progress
= 0;
1170 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1177 * Do not write anything when the BDS is inactive. That is not
1178 * allowed, and it would not help.
1180 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1182 /* FIXME We cannot require callers to have write permissions when all they
1183 * are doing is a read request. If we did things right, write permissions
1184 * would be obtained anyway, but internally by the copy-on-read code. As
1185 * long as it is implemented here rather than in a separate filter driver,
1186 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1187 * it could request permissions. Therefore we have to bypass the permission
1188 * system for the moment. */
1189 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1191 /* Cover entire cluster so no additional backing file I/O is required when
1192 * allocating cluster in the image file. Note that this value may exceed
1193 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1194 * is one reason we loop rather than doing it all at once.
1196 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1197 skip_bytes
= offset
- cluster_offset
;
1199 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1200 cluster_offset
, cluster_bytes
);
1202 while (cluster_bytes
) {
1206 ret
= 1; /* "already allocated", so nothing will be copied */
1207 pnum
= MIN(cluster_bytes
, max_transfer
);
1209 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1210 MIN(cluster_bytes
, max_transfer
), &pnum
);
1213 * Safe to treat errors in querying allocation as if
1214 * unallocated; we'll probably fail again soon on the
1215 * read, but at least that will set a decent errno.
1217 pnum
= MIN(cluster_bytes
, max_transfer
);
1220 /* Stop at EOF if the image ends in the middle of the cluster */
1221 if (ret
== 0 && pnum
== 0) {
1222 assert(progress
>= bytes
);
1226 assert(skip_bytes
< pnum
);
1230 QEMUIOVector local_qiov
;
1232 /* Must copy-on-read; use the bounce buffer */
1233 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1234 if (!bounce_buffer
) {
1235 int64_t max_we_need
= MAX(pnum
, cluster_bytes
- pnum
);
1236 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1237 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1239 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1240 if (!bounce_buffer
) {
1245 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1247 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1253 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1254 if (drv
->bdrv_co_pwrite_zeroes
&&
1255 buffer_is_zero(bounce_buffer
, pnum
)) {
1256 /* FIXME: Should we (perhaps conditionally) be setting
1257 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1258 * that still correctly reads as zero? */
1259 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1260 BDRV_REQ_WRITE_UNCHANGED
);
1262 /* This does not change the data on the disk, it is not
1263 * necessary to flush even in cache=writethrough mode.
1265 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1267 BDRV_REQ_WRITE_UNCHANGED
);
1271 /* It might be okay to ignore write errors for guest
1272 * requests. If this is a deliberate copy-on-read
1273 * then we don't want to ignore the error. Simply
1274 * report it in all cases.
1279 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1280 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1281 bounce_buffer
+ skip_bytes
,
1282 MIN(pnum
- skip_bytes
, bytes
- progress
));
1284 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1285 /* Read directly into the destination */
1286 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1287 MIN(pnum
- skip_bytes
, bytes
- progress
),
1288 qiov
, qiov_offset
+ progress
, 0);
1294 cluster_offset
+= pnum
;
1295 cluster_bytes
-= pnum
;
1296 progress
+= pnum
- skip_bytes
;
1302 qemu_vfree(bounce_buffer
);
1307 * Forwards an already correctly aligned request to the BlockDriver. This
1308 * handles copy on read, zeroing after EOF, and fragmentation of large
1309 * reads; any other features must be implemented by the caller.
1311 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1312 BdrvTrackedRequest
*req
, int64_t offset
, int64_t bytes
,
1313 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1315 BlockDriverState
*bs
= child
->bs
;
1316 int64_t total_bytes
, max_bytes
;
1318 int64_t bytes_remaining
= bytes
;
1321 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1322 assert(is_power_of_2(align
));
1323 assert((offset
& (align
- 1)) == 0);
1324 assert((bytes
& (align
- 1)) == 0);
1325 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1326 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1330 * TODO: We would need a per-BDS .supported_read_flags and
1331 * potential fallback support, if we ever implement any read flags
1332 * to pass through to drivers. For now, there aren't any
1333 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1335 assert(!(flags
& ~(BDRV_REQ_COPY_ON_READ
| BDRV_REQ_PREFETCH
|
1336 BDRV_REQ_REGISTERED_BUF
)));
1338 /* Handle Copy on Read and associated serialisation */
1339 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1340 /* If we touch the same cluster it counts as an overlap. This
1341 * guarantees that allocating writes will be serialized and not race
1342 * with each other for the same cluster. For example, in copy-on-read
1343 * it ensures that the CoR read and write operations are atomic and
1344 * guest writes cannot interleave between them. */
1345 bdrv_make_request_serialising(req
, bdrv_get_cluster_size(bs
));
1347 bdrv_wait_serialising_requests(req
);
1350 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1353 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1354 flags
&= ~BDRV_REQ_COPY_ON_READ
;
1356 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1361 if (!ret
|| pnum
!= bytes
) {
1362 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1363 qiov
, qiov_offset
, flags
);
1365 } else if (flags
& BDRV_REQ_PREFETCH
) {
1370 /* Forward the request to the BlockDriver, possibly fragmenting it */
1371 total_bytes
= bdrv_getlength(bs
);
1372 if (total_bytes
< 0) {
1377 assert(!(flags
& ~(bs
->supported_read_flags
| BDRV_REQ_REGISTERED_BUF
)));
1379 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1380 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1381 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1385 while (bytes_remaining
) {
1389 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1392 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1394 qiov_offset
+ bytes
- bytes_remaining
,
1398 num
= bytes_remaining
;
1399 ret
= qemu_iovec_memset(qiov
, qiov_offset
+ bytes
- bytes_remaining
,
1400 0, bytes_remaining
);
1405 bytes_remaining
-= num
;
1409 return ret
< 0 ? ret
: 0;
1415 * |<---- align ----->| |<----- align ---->|
1416 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1418 * -*----------$-------*-------- ... --------*-----$------------*---
1420 * | offset | | end |
1421 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1422 * [buf ... ) [tail_buf )
1424 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1425 * is placed at the beginning of @buf and @tail at the @end.
1427 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1428 * around tail, if tail exists.
1430 * @merge_reads is true for small requests,
1431 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1432 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1434 typedef struct BdrvRequestPadding
{
1441 QEMUIOVector local_qiov
;
1442 } BdrvRequestPadding
;
1444 static bool bdrv_init_padding(BlockDriverState
*bs
,
1445 int64_t offset
, int64_t bytes
,
1446 BdrvRequestPadding
*pad
)
1448 int64_t align
= bs
->bl
.request_alignment
;
1451 bdrv_check_request(offset
, bytes
, &error_abort
);
1452 assert(align
<= INT_MAX
); /* documented in block/block_int.h */
1453 assert(align
<= SIZE_MAX
/ 2); /* so we can allocate the buffer */
1455 memset(pad
, 0, sizeof(*pad
));
1457 pad
->head
= offset
& (align
- 1);
1458 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1460 pad
->tail
= align
- pad
->tail
;
1463 if (!pad
->head
&& !pad
->tail
) {
1467 assert(bytes
); /* Nothing good in aligning zero-length requests */
1469 sum
= pad
->head
+ bytes
+ pad
->tail
;
1470 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1471 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1472 pad
->merge_reads
= sum
== pad
->buf_len
;
1474 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1480 static coroutine_fn
int bdrv_padding_rmw_read(BdrvChild
*child
,
1481 BdrvTrackedRequest
*req
,
1482 BdrvRequestPadding
*pad
,
1485 QEMUIOVector local_qiov
;
1486 BlockDriverState
*bs
= child
->bs
;
1487 uint64_t align
= bs
->bl
.request_alignment
;
1490 assert(req
->serialising
&& pad
->buf
);
1492 if (pad
->head
|| pad
->merge_reads
) {
1493 int64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1495 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1498 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1500 if (pad
->merge_reads
&& pad
->tail
) {
1501 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1503 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1504 align
, &local_qiov
, 0, 0);
1509 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1511 if (pad
->merge_reads
&& pad
->tail
) {
1512 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1515 if (pad
->merge_reads
) {
1521 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1523 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1524 ret
= bdrv_aligned_preadv(
1526 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1527 align
, align
, &local_qiov
, 0, 0);
1531 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1536 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1542 static void bdrv_padding_destroy(BdrvRequestPadding
*pad
)
1545 qemu_vfree(pad
->buf
);
1546 qemu_iovec_destroy(&pad
->local_qiov
);
1548 memset(pad
, 0, sizeof(*pad
));
1554 * Exchange request parameters with padded request if needed. Don't include RMW
1555 * read of padding, bdrv_padding_rmw_read() should be called separately if
1558 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1559 * - on function start they represent original request
1560 * - on failure or when padding is not needed they are unchanged
1561 * - on success when padding is needed they represent padded request
1563 static int bdrv_pad_request(BlockDriverState
*bs
,
1564 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1565 int64_t *offset
, int64_t *bytes
,
1566 BdrvRequestPadding
*pad
, bool *padded
,
1567 BdrvRequestFlags
*flags
)
1571 bdrv_check_qiov_request(*offset
, *bytes
, *qiov
, *qiov_offset
, &error_abort
);
1573 if (!bdrv_init_padding(bs
, *offset
, *bytes
, pad
)) {
1580 ret
= qemu_iovec_init_extended(&pad
->local_qiov
, pad
->buf
, pad
->head
,
1581 *qiov
, *qiov_offset
, *bytes
,
1582 pad
->buf
+ pad
->buf_len
- pad
->tail
,
1585 bdrv_padding_destroy(pad
);
1588 *bytes
+= pad
->head
+ pad
->tail
;
1589 *offset
-= pad
->head
;
1590 *qiov
= &pad
->local_qiov
;
1596 /* Can't use optimization hint with bounce buffer */
1597 *flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1603 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1604 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1605 BdrvRequestFlags flags
)
1608 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1611 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1612 int64_t offset
, int64_t bytes
,
1613 QEMUIOVector
*qiov
, size_t qiov_offset
,
1614 BdrvRequestFlags flags
)
1616 BlockDriverState
*bs
= child
->bs
;
1617 BdrvTrackedRequest req
;
1618 BdrvRequestPadding pad
;
1622 trace_bdrv_co_preadv_part(bs
, offset
, bytes
, flags
);
1624 if (!bdrv_is_inserted(bs
)) {
1628 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
1633 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
1635 * Aligning zero request is nonsense. Even if driver has special meaning
1636 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1637 * it to driver due to request_alignment.
1639 * Still, no reason to return an error if someone do unaligned
1640 * zero-length read occasionally.
1645 bdrv_inc_in_flight(bs
);
1647 /* Don't do copy-on-read if we read data before write operation */
1648 if (qatomic_read(&bs
->copy_on_read
)) {
1649 flags
|= BDRV_REQ_COPY_ON_READ
;
1652 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
1658 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1659 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1660 bs
->bl
.request_alignment
,
1661 qiov
, qiov_offset
, flags
);
1662 tracked_request_end(&req
);
1663 bdrv_padding_destroy(&pad
);
1666 bdrv_dec_in_flight(bs
);
1671 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1672 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
)
1674 BlockDriver
*drv
= bs
->drv
;
1678 bool need_flush
= false;
1682 int64_t max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
,
1684 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1685 bs
->bl
.request_alignment
);
1686 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1688 bdrv_check_request(offset
, bytes
, &error_abort
);
1694 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1698 /* By definition there is no user buffer so this flag doesn't make sense */
1699 if (flags
& BDRV_REQ_REGISTERED_BUF
) {
1703 /* Invalidate the cached block-status data range if this write overlaps */
1704 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
1706 assert(alignment
% bs
->bl
.request_alignment
== 0);
1707 head
= offset
% alignment
;
1708 tail
= (offset
+ bytes
) % alignment
;
1709 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1710 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1712 while (bytes
> 0 && !ret
) {
1713 int64_t num
= bytes
;
1715 /* Align request. Block drivers can expect the "bulk" of the request
1716 * to be aligned, and that unaligned requests do not cross cluster
1720 /* Make a small request up to the first aligned sector. For
1721 * convenience, limit this request to max_transfer even if
1722 * we don't need to fall back to writes. */
1723 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1724 head
= (head
+ num
) % alignment
;
1725 assert(num
< max_write_zeroes
);
1726 } else if (tail
&& num
> alignment
) {
1727 /* Shorten the request to the last aligned sector. */
1731 /* limit request size */
1732 if (num
> max_write_zeroes
) {
1733 num
= max_write_zeroes
;
1737 /* First try the efficient write zeroes operation */
1738 if (drv
->bdrv_co_pwrite_zeroes
) {
1739 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1740 flags
& bs
->supported_zero_flags
);
1741 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1742 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1746 assert(!bs
->supported_zero_flags
);
1749 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1750 /* Fall back to bounce buffer if write zeroes is unsupported */
1751 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1753 if ((flags
& BDRV_REQ_FUA
) &&
1754 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1755 /* No need for bdrv_driver_pwrite() to do a fallback
1756 * flush on each chunk; use just one at the end */
1757 write_flags
&= ~BDRV_REQ_FUA
;
1760 num
= MIN(num
, max_transfer
);
1762 buf
= qemu_try_blockalign0(bs
, num
);
1768 qemu_iovec_init_buf(&qiov
, buf
, num
);
1770 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1772 /* Keep bounce buffer around if it is big enough for all
1773 * all future requests.
1775 if (num
< max_transfer
) {
1786 if (ret
== 0 && need_flush
) {
1787 ret
= bdrv_co_flush(bs
);
1793 static inline int coroutine_fn
1794 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1795 BdrvTrackedRequest
*req
, int flags
)
1797 BlockDriverState
*bs
= child
->bs
;
1799 bdrv_check_request(offset
, bytes
, &error_abort
);
1801 if (bdrv_is_read_only(bs
)) {
1805 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1806 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1807 assert(!(flags
& ~BDRV_REQ_MASK
));
1808 assert(!((flags
& BDRV_REQ_NO_WAIT
) && !(flags
& BDRV_REQ_SERIALISING
)));
1810 if (flags
& BDRV_REQ_SERIALISING
) {
1811 QEMU_LOCK_GUARD(&bs
->reqs_lock
);
1813 tracked_request_set_serialising(req
, bdrv_get_cluster_size(bs
));
1815 if ((flags
& BDRV_REQ_NO_WAIT
) && bdrv_find_conflicting_request(req
)) {
1819 bdrv_wait_serialising_requests_locked(req
);
1821 bdrv_wait_serialising_requests(req
);
1824 assert(req
->overlap_offset
<= offset
);
1825 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1826 assert(offset
+ bytes
<= bs
->total_sectors
* BDRV_SECTOR_SIZE
||
1827 child
->perm
& BLK_PERM_RESIZE
);
1829 switch (req
->type
) {
1830 case BDRV_TRACKED_WRITE
:
1831 case BDRV_TRACKED_DISCARD
:
1832 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1833 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1835 assert(child
->perm
& BLK_PERM_WRITE
);
1837 bdrv_write_threshold_check_write(bs
, offset
, bytes
);
1839 case BDRV_TRACKED_TRUNCATE
:
1840 assert(child
->perm
& BLK_PERM_RESIZE
);
1847 static inline void coroutine_fn
1848 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1849 BdrvTrackedRequest
*req
, int ret
)
1851 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1852 BlockDriverState
*bs
= child
->bs
;
1854 bdrv_check_request(offset
, bytes
, &error_abort
);
1856 qatomic_inc(&bs
->write_gen
);
1859 * Discard cannot extend the image, but in error handling cases, such as
1860 * when reverting a qcow2 cluster allocation, the discarded range can pass
1861 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1862 * here. Instead, just skip it, since semantically a discard request
1863 * beyond EOF cannot expand the image anyway.
1866 (req
->type
== BDRV_TRACKED_TRUNCATE
||
1867 end_sector
> bs
->total_sectors
) &&
1868 req
->type
!= BDRV_TRACKED_DISCARD
) {
1869 bs
->total_sectors
= end_sector
;
1870 bdrv_parent_cb_resize(bs
);
1871 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
1874 switch (req
->type
) {
1875 case BDRV_TRACKED_WRITE
:
1876 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1877 /* fall through, to set dirty bits */
1878 case BDRV_TRACKED_DISCARD
:
1879 bdrv_set_dirty(bs
, offset
, bytes
);
1888 * Forwards an already correctly aligned write request to the BlockDriver,
1889 * after possibly fragmenting it.
1891 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1892 BdrvTrackedRequest
*req
, int64_t offset
, int64_t bytes
,
1893 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
,
1894 BdrvRequestFlags flags
)
1896 BlockDriverState
*bs
= child
->bs
;
1897 BlockDriver
*drv
= bs
->drv
;
1900 int64_t bytes_remaining
= bytes
;
1903 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1909 if (bdrv_has_readonly_bitmaps(bs
)) {
1913 assert(is_power_of_2(align
));
1914 assert((offset
& (align
- 1)) == 0);
1915 assert((bytes
& (align
- 1)) == 0);
1916 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1919 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
1921 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1922 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1923 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
1924 flags
|= BDRV_REQ_ZERO_WRITE
;
1925 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1926 flags
|= BDRV_REQ_MAY_UNMAP
;
1931 /* Do nothing, write notifier decided to fail this request */
1932 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1933 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1934 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1935 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1936 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
1938 } else if (bytes
<= max_transfer
) {
1939 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1940 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1942 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1943 while (bytes_remaining
) {
1944 int num
= MIN(bytes_remaining
, max_transfer
);
1945 int local_flags
= flags
;
1948 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1949 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1950 /* If FUA is going to be emulated by flush, we only
1951 * need to flush on the last iteration */
1952 local_flags
&= ~BDRV_REQ_FUA
;
1955 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1957 qiov_offset
+ bytes
- bytes_remaining
,
1962 bytes_remaining
-= num
;
1965 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1970 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
1975 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1978 BdrvRequestFlags flags
,
1979 BdrvTrackedRequest
*req
)
1981 BlockDriverState
*bs
= child
->bs
;
1982 QEMUIOVector local_qiov
;
1983 uint64_t align
= bs
->bl
.request_alignment
;
1986 BdrvRequestPadding pad
;
1988 /* This flag doesn't make sense for padding or zero writes */
1989 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1991 padding
= bdrv_init_padding(bs
, offset
, bytes
, &pad
);
1993 assert(!(flags
& BDRV_REQ_NO_WAIT
));
1994 bdrv_make_request_serialising(req
, align
);
1996 bdrv_padding_rmw_read(child
, req
, &pad
, true);
1998 if (pad
.head
|| pad
.merge_reads
) {
1999 int64_t aligned_offset
= offset
& ~(align
- 1);
2000 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2002 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2003 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2004 align
, &local_qiov
, 0,
2005 flags
& ~BDRV_REQ_ZERO_WRITE
);
2006 if (ret
< 0 || pad
.merge_reads
) {
2007 /* Error or all work is done */
2010 offset
+= write_bytes
- pad
.head
;
2011 bytes
-= write_bytes
- pad
.head
;
2015 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2016 if (bytes
>= align
) {
2017 /* Write the aligned part in the middle. */
2018 int64_t aligned_bytes
= bytes
& ~(align
- 1);
2019 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2024 bytes
-= aligned_bytes
;
2025 offset
+= aligned_bytes
;
2028 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2030 assert(align
== pad
.tail
+ bytes
);
2032 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2033 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2035 flags
& ~BDRV_REQ_ZERO_WRITE
);
2039 bdrv_padding_destroy(&pad
);
2045 * Handle a write request in coroutine context
2047 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2048 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
2049 BdrvRequestFlags flags
)
2052 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2055 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2056 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2057 BdrvRequestFlags flags
)
2059 BlockDriverState
*bs
= child
->bs
;
2060 BdrvTrackedRequest req
;
2061 uint64_t align
= bs
->bl
.request_alignment
;
2062 BdrvRequestPadding pad
;
2064 bool padded
= false;
2067 trace_bdrv_co_pwritev_part(child
->bs
, offset
, bytes
, flags
);
2069 if (!bdrv_is_inserted(bs
)) {
2073 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2074 ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
2076 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
2082 /* If the request is misaligned then we can't make it efficient */
2083 if ((flags
& BDRV_REQ_NO_FALLBACK
) &&
2084 !QEMU_IS_ALIGNED(offset
| bytes
, align
))
2089 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
2091 * Aligning zero request is nonsense. Even if driver has special meaning
2092 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2093 * it to driver due to request_alignment.
2095 * Still, no reason to return an error if someone do unaligned
2096 * zero-length write occasionally.
2101 if (!(flags
& BDRV_REQ_ZERO_WRITE
)) {
2103 * Pad request for following read-modify-write cycle.
2104 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2105 * alignment only if there is no ZERO flag.
2107 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
2114 bdrv_inc_in_flight(bs
);
2115 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2117 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2119 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2125 * Request was unaligned to request_alignment and therefore
2126 * padded. We are going to do read-modify-write, and must
2127 * serialize the request to prevent interactions of the
2128 * widened region with other transactions.
2130 assert(!(flags
& BDRV_REQ_NO_WAIT
));
2131 bdrv_make_request_serialising(&req
, align
);
2132 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2135 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2136 qiov
, qiov_offset
, flags
);
2138 bdrv_padding_destroy(&pad
);
2141 tracked_request_end(&req
);
2142 bdrv_dec_in_flight(bs
);
2147 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2148 int64_t bytes
, BdrvRequestFlags flags
)
2151 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2153 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2154 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2157 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2158 BDRV_REQ_ZERO_WRITE
| flags
);
2162 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2164 int bdrv_flush_all(void)
2166 BdrvNextIterator it
;
2167 BlockDriverState
*bs
= NULL
;
2170 GLOBAL_STATE_CODE();
2173 * bdrv queue is managed by record/replay,
2174 * creating new flush request for stopping
2175 * the VM may break the determinism
2177 if (replay_events_enabled()) {
2181 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2182 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2185 aio_context_acquire(aio_context
);
2186 ret
= bdrv_flush(bs
);
2187 if (ret
< 0 && !result
) {
2190 aio_context_release(aio_context
);
2197 * Returns the allocation status of the specified sectors.
2198 * Drivers not implementing the functionality are assumed to not support
2199 * backing files, hence all their sectors are reported as allocated.
2201 * If 'want_zero' is true, the caller is querying for mapping
2202 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2203 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2204 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2206 * If 'offset' is beyond the end of the disk image the return value is
2207 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2209 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2210 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2211 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2213 * 'pnum' is set to the number of bytes (including and immediately
2214 * following the specified offset) that are easily known to be in the
2215 * same allocated/unallocated state. Note that a second call starting
2216 * at the original offset plus returned pnum may have the same status.
2217 * The returned value is non-zero on success except at end-of-file.
2219 * Returns negative errno on failure. Otherwise, if the
2220 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2221 * set to the host mapping and BDS corresponding to the guest offset.
2223 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
2225 int64_t offset
, int64_t bytes
,
2226 int64_t *pnum
, int64_t *map
,
2227 BlockDriverState
**file
)
2230 int64_t n
; /* bytes */
2232 int64_t local_map
= 0;
2233 BlockDriverState
*local_file
= NULL
;
2234 int64_t aligned_offset
, aligned_bytes
;
2236 bool has_filtered_child
;
2240 total_size
= bdrv_getlength(bs
);
2241 if (total_size
< 0) {
2246 if (offset
>= total_size
) {
2247 ret
= BDRV_BLOCK_EOF
;
2255 n
= total_size
- offset
;
2260 /* Must be non-NULL or bdrv_getlength() would have failed */
2262 has_filtered_child
= bdrv_filter_child(bs
);
2263 if (!bs
->drv
->bdrv_co_block_status
&& !has_filtered_child
) {
2265 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2266 if (offset
+ bytes
== total_size
) {
2267 ret
|= BDRV_BLOCK_EOF
;
2269 if (bs
->drv
->protocol_name
) {
2270 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2277 bdrv_inc_in_flight(bs
);
2279 /* Round out to request_alignment boundaries */
2280 align
= bs
->bl
.request_alignment
;
2281 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2282 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2284 if (bs
->drv
->bdrv_co_block_status
) {
2286 * Use the block-status cache only for protocol nodes: Format
2287 * drivers are generally quick to inquire the status, but protocol
2288 * drivers often need to get information from outside of qemu, so
2289 * we do not have control over the actual implementation. There
2290 * have been cases where inquiring the status took an unreasonably
2291 * long time, and we can do nothing in qemu to fix it.
2292 * This is especially problematic for images with large data areas,
2293 * because finding the few holes in them and giving them special
2294 * treatment does not gain much performance. Therefore, we try to
2295 * cache the last-identified data region.
2297 * Second, limiting ourselves to protocol nodes allows us to assume
2298 * the block status for data regions to be DATA | OFFSET_VALID, and
2299 * that the host offset is the same as the guest offset.
2301 * Note that it is possible that external writers zero parts of
2302 * the cached regions without the cache being invalidated, and so
2303 * we may report zeroes as data. This is not catastrophic,
2304 * however, because reporting zeroes as data is fine.
2306 if (QLIST_EMPTY(&bs
->children
) &&
2307 bdrv_bsc_is_data(bs
, aligned_offset
, pnum
))
2309 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
2311 local_map
= aligned_offset
;
2313 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2314 aligned_bytes
, pnum
, &local_map
,
2318 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2319 * the cache is queried above. Technically, we do not need to check
2320 * it here; the worst that can happen is that we fill the cache for
2321 * non-protocol nodes, and then it is never used. However, filling
2322 * the cache requires an RCU update, so double check here to avoid
2323 * such an update if possible.
2325 * Check want_zero, because we only want to update the cache when we
2326 * have accurate information about what is zero and what is data.
2329 ret
== (BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
) &&
2330 QLIST_EMPTY(&bs
->children
))
2333 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2334 * returned local_map value must be the same as the offset we
2335 * have passed (aligned_offset), and local_bs must be the node
2337 * Assert this, because we follow this rule when reading from
2338 * the cache (see the `local_file = bs` and
2339 * `local_map = aligned_offset` assignments above), and the
2340 * result the cache delivers must be the same as the driver
2343 assert(local_file
== bs
);
2344 assert(local_map
== aligned_offset
);
2345 bdrv_bsc_fill(bs
, aligned_offset
, *pnum
);
2349 /* Default code for filters */
2351 local_file
= bdrv_filter_bs(bs
);
2354 *pnum
= aligned_bytes
;
2355 local_map
= aligned_offset
;
2356 ret
= BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2364 * The driver's result must be a non-zero multiple of request_alignment.
2365 * Clamp pnum and adjust map to original request.
2367 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2368 align
> offset
- aligned_offset
);
2369 if (ret
& BDRV_BLOCK_RECURSE
) {
2370 assert(ret
& BDRV_BLOCK_DATA
);
2371 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2372 assert(!(ret
& BDRV_BLOCK_ZERO
));
2375 *pnum
-= offset
- aligned_offset
;
2376 if (*pnum
> bytes
) {
2379 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2380 local_map
+= offset
- aligned_offset
;
2383 if (ret
& BDRV_BLOCK_RAW
) {
2384 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2385 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2386 *pnum
, pnum
, &local_map
, &local_file
);
2390 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2391 ret
|= BDRV_BLOCK_ALLOCATED
;
2392 } else if (bs
->drv
->supports_backing
) {
2393 BlockDriverState
*cow_bs
= bdrv_cow_bs(bs
);
2396 ret
|= BDRV_BLOCK_ZERO
;
2397 } else if (want_zero
) {
2398 int64_t size2
= bdrv_getlength(cow_bs
);
2400 if (size2
>= 0 && offset
>= size2
) {
2401 ret
|= BDRV_BLOCK_ZERO
;
2406 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2407 local_file
&& local_file
!= bs
&&
2408 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2409 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2413 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2414 *pnum
, &file_pnum
, NULL
, NULL
);
2416 /* Ignore errors. This is just providing extra information, it
2417 * is useful but not necessary.
2419 if (ret2
& BDRV_BLOCK_EOF
&&
2420 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2422 * It is valid for the format block driver to read
2423 * beyond the end of the underlying file's current
2424 * size; such areas read as zero.
2426 ret
|= BDRV_BLOCK_ZERO
;
2428 /* Limit request to the range reported by the protocol driver */
2430 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2436 bdrv_dec_in_flight(bs
);
2437 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2438 ret
|= BDRV_BLOCK_EOF
;
2451 bdrv_co_common_block_status_above(BlockDriverState
*bs
,
2452 BlockDriverState
*base
,
2459 BlockDriverState
**file
,
2463 BlockDriverState
*p
;
2468 assert(!include_base
|| base
); /* Can't include NULL base */
2475 if (!include_base
&& bs
== base
) {
2480 ret
= bdrv_co_block_status(bs
, want_zero
, offset
, bytes
, pnum
, map
, file
);
2482 if (ret
< 0 || *pnum
== 0 || ret
& BDRV_BLOCK_ALLOCATED
|| bs
== base
) {
2486 if (ret
& BDRV_BLOCK_EOF
) {
2487 eof
= offset
+ *pnum
;
2490 assert(*pnum
<= bytes
);
2493 for (p
= bdrv_filter_or_cow_bs(bs
); include_base
|| p
!= base
;
2494 p
= bdrv_filter_or_cow_bs(p
))
2496 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2504 * The top layer deferred to this layer, and because this layer is
2505 * short, any zeroes that we synthesize beyond EOF behave as if they
2506 * were allocated at this layer.
2508 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2509 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2512 assert(ret
& BDRV_BLOCK_EOF
);
2517 ret
= BDRV_BLOCK_ZERO
| BDRV_BLOCK_ALLOCATED
;
2520 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2522 * We've found the node and the status, we must break.
2524 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2525 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2528 ret
&= ~BDRV_BLOCK_EOF
;
2533 assert(include_base
);
2538 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2539 * let's continue the diving.
2541 assert(*pnum
<= bytes
);
2545 if (offset
+ *pnum
== eof
) {
2546 ret
|= BDRV_BLOCK_EOF
;
2552 int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2553 BlockDriverState
*base
,
2554 int64_t offset
, int64_t bytes
,
2555 int64_t *pnum
, int64_t *map
,
2556 BlockDriverState
**file
)
2559 return bdrv_co_common_block_status_above(bs
, base
, false, true, offset
,
2560 bytes
, pnum
, map
, file
, NULL
);
2563 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2564 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2565 int64_t *map
, BlockDriverState
**file
)
2568 return bdrv_common_block_status_above(bs
, base
, false, true, offset
, bytes
,
2569 pnum
, map
, file
, NULL
);
2572 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2573 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2576 return bdrv_block_status_above(bs
, bdrv_filter_or_cow_bs(bs
),
2577 offset
, bytes
, pnum
, map
, file
);
2581 * Check @bs (and its backing chain) to see if the range defined
2582 * by @offset and @bytes is known to read as zeroes.
2583 * Return 1 if that is the case, 0 otherwise and -errno on error.
2584 * This test is meant to be fast rather than accurate so returning 0
2585 * does not guarantee non-zero data.
2587 int coroutine_fn
bdrv_co_is_zero_fast(BlockDriverState
*bs
, int64_t offset
,
2591 int64_t pnum
= bytes
;
2598 ret
= bdrv_co_common_block_status_above(bs
, NULL
, false, false, offset
,
2599 bytes
, &pnum
, NULL
, NULL
, NULL
);
2605 return (pnum
== bytes
) && (ret
& BDRV_BLOCK_ZERO
);
2608 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2609 int64_t bytes
, int64_t *pnum
)
2615 ret
= bdrv_co_common_block_status_above(bs
, bs
, true, false, offset
,
2616 bytes
, pnum
? pnum
: &dummy
, NULL
,
2621 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2624 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2631 ret
= bdrv_common_block_status_above(bs
, bs
, true, false, offset
,
2632 bytes
, pnum
? pnum
: &dummy
, NULL
,
2637 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2640 /* See bdrv_is_allocated_above for documentation */
2641 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2642 BlockDriverState
*base
,
2643 bool include_base
, int64_t offset
,
2644 int64_t bytes
, int64_t *pnum
)
2650 ret
= bdrv_co_common_block_status_above(top
, base
, include_base
, false,
2651 offset
, bytes
, pnum
, NULL
, NULL
,
2657 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2664 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2666 * Return a positive depth if (a prefix of) the given range is allocated
2667 * in any image between BASE and TOP (BASE is only included if include_base
2668 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2669 * BASE can be NULL to check if the given offset is allocated in any
2670 * image of the chain. Return 0 otherwise, or negative errno on
2673 * 'pnum' is set to the number of bytes (including and immediately
2674 * following the specified offset) that are known to be in the same
2675 * allocated/unallocated state. Note that a subsequent call starting
2676 * at 'offset + *pnum' may return the same allocation status (in other
2677 * words, the result is not necessarily the maximum possible range);
2678 * but 'pnum' will only be 0 when end of file is reached.
2680 int bdrv_is_allocated_above(BlockDriverState
*top
,
2681 BlockDriverState
*base
,
2682 bool include_base
, int64_t offset
,
2683 int64_t bytes
, int64_t *pnum
)
2689 ret
= bdrv_common_block_status_above(top
, base
, include_base
, false,
2690 offset
, bytes
, pnum
, NULL
, NULL
,
2696 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2703 bdrv_co_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2705 BlockDriver
*drv
= bs
->drv
;
2706 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2709 assert_bdrv_graph_readable();
2711 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2720 bdrv_inc_in_flight(bs
);
2722 if (drv
->bdrv_load_vmstate
) {
2723 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2724 } else if (child_bs
) {
2725 ret
= bdrv_co_readv_vmstate(child_bs
, qiov
, pos
);
2730 bdrv_dec_in_flight(bs
);
2736 bdrv_co_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2738 BlockDriver
*drv
= bs
->drv
;
2739 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2742 assert_bdrv_graph_readable();
2744 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2753 bdrv_inc_in_flight(bs
);
2755 if (drv
->bdrv_save_vmstate
) {
2756 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2757 } else if (child_bs
) {
2758 ret
= bdrv_co_writev_vmstate(child_bs
, qiov
, pos
);
2763 bdrv_dec_in_flight(bs
);
2768 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2769 int64_t pos
, int size
)
2771 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2772 int ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2775 return ret
< 0 ? ret
: size
;
2778 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2779 int64_t pos
, int size
)
2781 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2782 int ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2785 return ret
< 0 ? ret
: size
;
2788 /**************************************************************/
2791 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2795 bdrv_aio_cancel_async(acb
);
2796 while (acb
->refcnt
> 1) {
2797 if (acb
->aiocb_info
->get_aio_context
) {
2798 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2799 } else if (acb
->bs
) {
2800 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2801 * assert that we're not using an I/O thread. Thread-safe
2802 * code should use bdrv_aio_cancel_async exclusively.
2804 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2805 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2810 qemu_aio_unref(acb
);
2813 /* Async version of aio cancel. The caller is not blocked if the acb implements
2814 * cancel_async, otherwise we do nothing and let the request normally complete.
2815 * In either case the completion callback must be called. */
2816 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2819 if (acb
->aiocb_info
->cancel_async
) {
2820 acb
->aiocb_info
->cancel_async(acb
);
2824 /**************************************************************/
2825 /* Coroutine block device emulation */
2827 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2829 BdrvChild
*primary_child
= bdrv_primary_child(bs
);
2835 bdrv_inc_in_flight(bs
);
2837 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2842 qemu_co_mutex_lock(&bs
->reqs_lock
);
2843 current_gen
= qatomic_read(&bs
->write_gen
);
2845 /* Wait until any previous flushes are completed */
2846 while (bs
->active_flush_req
) {
2847 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2850 /* Flushes reach this point in nondecreasing current_gen order. */
2851 bs
->active_flush_req
= true;
2852 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2854 /* Write back all layers by calling one driver function */
2855 if (bs
->drv
->bdrv_co_flush
) {
2856 ret
= bs
->drv
->bdrv_co_flush(bs
);
2860 /* Write back cached data to the OS even with cache=unsafe */
2861 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_OS
);
2862 if (bs
->drv
->bdrv_co_flush_to_os
) {
2863 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2869 /* But don't actually force it to the disk with cache=unsafe */
2870 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2871 goto flush_children
;
2874 /* Check if we really need to flush anything */
2875 if (bs
->flushed_gen
== current_gen
) {
2876 goto flush_children
;
2879 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_DISK
);
2881 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2882 * (even in case of apparent success) */
2886 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2887 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2888 } else if (bs
->drv
->bdrv_aio_flush
) {
2890 CoroutineIOCompletion co
= {
2891 .coroutine
= qemu_coroutine_self(),
2894 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2898 qemu_coroutine_yield();
2903 * Some block drivers always operate in either writethrough or unsafe
2904 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2905 * know how the server works (because the behaviour is hardcoded or
2906 * depends on server-side configuration), so we can't ensure that
2907 * everything is safe on disk. Returning an error doesn't work because
2908 * that would break guests even if the server operates in writethrough
2911 * Let's hope the user knows what he's doing.
2920 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2921 * in the case of cache=unsafe, so there are no useless flushes.
2925 QLIST_FOREACH(child
, &bs
->children
, next
) {
2926 if (child
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
)) {
2927 int this_child_ret
= bdrv_co_flush(child
->bs
);
2929 ret
= this_child_ret
;
2935 /* Notify any pending flushes that we have completed */
2937 bs
->flushed_gen
= current_gen
;
2940 qemu_co_mutex_lock(&bs
->reqs_lock
);
2941 bs
->active_flush_req
= false;
2942 /* Return value is ignored - it's ok if wait queue is empty */
2943 qemu_co_queue_next(&bs
->flush_queue
);
2944 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2947 bdrv_dec_in_flight(bs
);
2951 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
2954 BdrvTrackedRequest req
;
2956 int64_t max_pdiscard
;
2957 int head
, tail
, align
;
2958 BlockDriverState
*bs
= child
->bs
;
2961 if (!bs
|| !bs
->drv
|| !bdrv_is_inserted(bs
)) {
2965 if (bdrv_has_readonly_bitmaps(bs
)) {
2969 ret
= bdrv_check_request(offset
, bytes
, NULL
);
2974 /* Do nothing if disabled. */
2975 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2979 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2983 /* Invalidate the cached block-status data range if this discard overlaps */
2984 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
2986 /* Discard is advisory, but some devices track and coalesce
2987 * unaligned requests, so we must pass everything down rather than
2988 * round here. Still, most devices will just silently ignore
2989 * unaligned requests (by returning -ENOTSUP), so we must fragment
2990 * the request accordingly. */
2991 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2992 assert(align
% bs
->bl
.request_alignment
== 0);
2993 head
= offset
% align
;
2994 tail
= (offset
+ bytes
) % align
;
2996 bdrv_inc_in_flight(bs
);
2997 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2999 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
3004 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT64_MAX
),
3006 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
3009 int64_t num
= bytes
;
3012 /* Make small requests to get to alignment boundaries. */
3013 num
= MIN(bytes
, align
- head
);
3014 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
3015 num
%= bs
->bl
.request_alignment
;
3017 head
= (head
+ num
) % align
;
3018 assert(num
< max_pdiscard
);
3021 /* Shorten the request to the last aligned cluster. */
3023 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
3024 tail
> bs
->bl
.request_alignment
) {
3025 tail
%= bs
->bl
.request_alignment
;
3029 /* limit request size */
3030 if (num
> max_pdiscard
) {
3038 if (bs
->drv
->bdrv_co_pdiscard
) {
3039 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
3042 CoroutineIOCompletion co
= {
3043 .coroutine
= qemu_coroutine_self(),
3046 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
3047 bdrv_co_io_em_complete
, &co
);
3052 qemu_coroutine_yield();
3056 if (ret
&& ret
!= -ENOTSUP
) {
3065 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
3066 tracked_request_end(&req
);
3067 bdrv_dec_in_flight(bs
);
3071 int coroutine_fn
bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
3073 BlockDriver
*drv
= bs
->drv
;
3074 CoroutineIOCompletion co
= {
3075 .coroutine
= qemu_coroutine_self(),
3080 bdrv_inc_in_flight(bs
);
3081 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
3086 if (drv
->bdrv_co_ioctl
) {
3087 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
3089 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3094 qemu_coroutine_yield();
3097 bdrv_dec_in_flight(bs
);
3101 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3104 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3107 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3110 return memset(qemu_blockalign(bs
, size
), 0, size
);
3113 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3115 size_t align
= bdrv_opt_mem_align(bs
);
3118 /* Ensure that NULL is never returned on success */
3124 return qemu_try_memalign(align
, size
);
3127 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3129 void *mem
= qemu_try_blockalign(bs
, size
);
3133 memset(mem
, 0, size
);
3139 void bdrv_io_plug(BlockDriverState
*bs
)
3144 QLIST_FOREACH(child
, &bs
->children
, next
) {
3145 bdrv_io_plug(child
->bs
);
3148 if (qatomic_fetch_inc(&bs
->io_plugged
) == 0) {
3149 BlockDriver
*drv
= bs
->drv
;
3150 if (drv
&& drv
->bdrv_io_plug
) {
3151 drv
->bdrv_io_plug(bs
);
3156 void bdrv_io_unplug(BlockDriverState
*bs
)
3161 assert(bs
->io_plugged
);
3162 if (qatomic_fetch_dec(&bs
->io_plugged
) == 1) {
3163 BlockDriver
*drv
= bs
->drv
;
3164 if (drv
&& drv
->bdrv_io_unplug
) {
3165 drv
->bdrv_io_unplug(bs
);
3169 QLIST_FOREACH(child
, &bs
->children
, next
) {
3170 bdrv_io_unplug(child
->bs
);
3174 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3175 static void bdrv_register_buf_rollback(BlockDriverState
*bs
,
3178 BdrvChild
*final_child
)
3182 QLIST_FOREACH(child
, &bs
->children
, next
) {
3183 if (child
== final_child
) {
3187 bdrv_unregister_buf(child
->bs
, host
, size
);
3190 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3191 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3195 bool bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
,
3200 GLOBAL_STATE_CODE();
3201 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3202 if (!bs
->drv
->bdrv_register_buf(bs
, host
, size
, errp
)) {
3206 QLIST_FOREACH(child
, &bs
->children
, next
) {
3207 if (!bdrv_register_buf(child
->bs
, host
, size
, errp
)) {
3208 bdrv_register_buf_rollback(bs
, host
, size
, child
);
3215 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3219 GLOBAL_STATE_CODE();
3220 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3221 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3223 QLIST_FOREACH(child
, &bs
->children
, next
) {
3224 bdrv_unregister_buf(child
->bs
, host
, size
);
3228 static int coroutine_fn
bdrv_co_copy_range_internal(
3229 BdrvChild
*src
, int64_t src_offset
, BdrvChild
*dst
,
3230 int64_t dst_offset
, int64_t bytes
,
3231 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3234 BdrvTrackedRequest req
;
3237 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3238 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3239 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3240 assert(!(read_flags
& BDRV_REQ_NO_WAIT
));
3241 assert(!(write_flags
& BDRV_REQ_NO_WAIT
));
3243 if (!dst
|| !dst
->bs
|| !bdrv_is_inserted(dst
->bs
)) {
3246 ret
= bdrv_check_request32(dst_offset
, bytes
, NULL
, 0);
3250 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3251 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3254 if (!src
|| !src
->bs
|| !bdrv_is_inserted(src
->bs
)) {
3257 ret
= bdrv_check_request32(src_offset
, bytes
, NULL
, 0);
3262 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3263 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3264 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3269 bdrv_inc_in_flight(src
->bs
);
3270 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3273 /* BDRV_REQ_SERIALISING is only for write operation */
3274 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3275 bdrv_wait_serialising_requests(&req
);
3277 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3281 read_flags
, write_flags
);
3283 tracked_request_end(&req
);
3284 bdrv_dec_in_flight(src
->bs
);
3286 bdrv_inc_in_flight(dst
->bs
);
3287 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3288 BDRV_TRACKED_WRITE
);
3289 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3292 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3296 read_flags
, write_flags
);
3298 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3299 tracked_request_end(&req
);
3300 bdrv_dec_in_flight(dst
->bs
);
3306 /* Copy range from @src to @dst.
3308 * See the comment of bdrv_co_copy_range for the parameter and return value
3310 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, int64_t src_offset
,
3311 BdrvChild
*dst
, int64_t dst_offset
,
3313 BdrvRequestFlags read_flags
,
3314 BdrvRequestFlags write_flags
)
3317 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3318 read_flags
, write_flags
);
3319 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3320 bytes
, read_flags
, write_flags
, true);
3323 /* Copy range from @src to @dst.
3325 * See the comment of bdrv_co_copy_range for the parameter and return value
3327 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, int64_t src_offset
,
3328 BdrvChild
*dst
, int64_t dst_offset
,
3330 BdrvRequestFlags read_flags
,
3331 BdrvRequestFlags write_flags
)
3334 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3335 read_flags
, write_flags
);
3336 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3337 bytes
, read_flags
, write_flags
, false);
3340 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, int64_t src_offset
,
3341 BdrvChild
*dst
, int64_t dst_offset
,
3342 int64_t bytes
, BdrvRequestFlags read_flags
,
3343 BdrvRequestFlags write_flags
)
3346 return bdrv_co_copy_range_from(src
, src_offset
,
3348 bytes
, read_flags
, write_flags
);
3351 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3354 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3355 if (c
->klass
->resize
) {
3356 c
->klass
->resize(c
);
3362 * Truncate file to 'offset' bytes (needed only for file protocols)
3364 * If 'exact' is true, the file must be resized to exactly the given
3365 * 'offset'. Otherwise, it is sufficient for the node to be at least
3366 * 'offset' bytes in length.
3368 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3369 PreallocMode prealloc
, BdrvRequestFlags flags
,
3372 BlockDriverState
*bs
= child
->bs
;
3373 BdrvChild
*filtered
, *backing
;
3374 BlockDriver
*drv
= bs
->drv
;
3375 BdrvTrackedRequest req
;
3376 int64_t old_size
, new_bytes
;
3380 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3382 error_setg(errp
, "No medium inserted");
3386 error_setg(errp
, "Image size cannot be negative");
3390 ret
= bdrv_check_request(offset
, 0, errp
);
3395 old_size
= bdrv_getlength(bs
);
3397 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3401 if (bdrv_is_read_only(bs
)) {
3402 error_setg(errp
, "Image is read-only");
3406 if (offset
> old_size
) {
3407 new_bytes
= offset
- old_size
;
3412 bdrv_inc_in_flight(bs
);
3413 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3414 BDRV_TRACKED_TRUNCATE
);
3416 /* If we are growing the image and potentially using preallocation for the
3417 * new area, we need to make sure that no write requests are made to it
3418 * concurrently or they might be overwritten by preallocation. */
3420 bdrv_make_request_serialising(&req
, 1);
3422 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3425 error_setg_errno(errp
, -ret
,
3426 "Failed to prepare request for truncation");
3430 filtered
= bdrv_filter_child(bs
);
3431 backing
= bdrv_cow_child(bs
);
3434 * If the image has a backing file that is large enough that it would
3435 * provide data for the new area, we cannot leave it unallocated because
3436 * then the backing file content would become visible. Instead, zero-fill
3439 * Note that if the image has a backing file, but was opened without the
3440 * backing file, taking care of keeping things consistent with that backing
3441 * file is the user's responsibility.
3443 if (new_bytes
&& backing
) {
3444 int64_t backing_len
;
3446 backing_len
= bdrv_getlength(backing
->bs
);
3447 if (backing_len
< 0) {
3449 error_setg_errno(errp
, -ret
, "Could not get backing file size");
3453 if (backing_len
> old_size
) {
3454 flags
|= BDRV_REQ_ZERO_WRITE
;
3458 if (drv
->bdrv_co_truncate
) {
3459 if (flags
& ~bs
->supported_truncate_flags
) {
3460 error_setg(errp
, "Block driver does not support requested flags");
3464 ret
= drv
->bdrv_co_truncate(bs
, offset
, exact
, prealloc
, flags
, errp
);
3465 } else if (filtered
) {
3466 ret
= bdrv_co_truncate(filtered
, offset
, exact
, prealloc
, flags
, errp
);
3468 error_setg(errp
, "Image format driver does not support resize");
3476 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3478 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3480 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3482 /* It's possible that truncation succeeded but refresh_total_sectors
3483 * failed, but the latter doesn't affect how we should finish the request.
3484 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3485 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3488 tracked_request_end(&req
);
3489 bdrv_dec_in_flight(bs
);
3494 void bdrv_cancel_in_flight(BlockDriverState
*bs
)
3496 GLOBAL_STATE_CODE();
3497 if (!bs
|| !bs
->drv
) {
3501 if (bs
->drv
->bdrv_cancel_in_flight
) {
3502 bs
->drv
->bdrv_cancel_in_flight(bs
);
3507 bdrv_co_preadv_snapshot(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
3508 QEMUIOVector
*qiov
, size_t qiov_offset
)
3510 BlockDriverState
*bs
= child
->bs
;
3511 BlockDriver
*drv
= bs
->drv
;
3519 if (!drv
->bdrv_co_preadv_snapshot
) {
3523 bdrv_inc_in_flight(bs
);
3524 ret
= drv
->bdrv_co_preadv_snapshot(bs
, offset
, bytes
, qiov
, qiov_offset
);
3525 bdrv_dec_in_flight(bs
);
3531 bdrv_co_snapshot_block_status(BlockDriverState
*bs
,
3532 bool want_zero
, int64_t offset
, int64_t bytes
,
3533 int64_t *pnum
, int64_t *map
,
3534 BlockDriverState
**file
)
3536 BlockDriver
*drv
= bs
->drv
;
3544 if (!drv
->bdrv_co_snapshot_block_status
) {
3548 bdrv_inc_in_flight(bs
);
3549 ret
= drv
->bdrv_co_snapshot_block_status(bs
, want_zero
, offset
, bytes
,
3551 bdrv_dec_in_flight(bs
);
3557 bdrv_co_pdiscard_snapshot(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
3559 BlockDriver
*drv
= bs
->drv
;
3567 if (!drv
->bdrv_co_pdiscard_snapshot
) {
3571 bdrv_inc_in_flight(bs
);
3572 ret
= drv
->bdrv_co_pdiscard_snapshot(bs
, offset
, bytes
);
3573 bdrv_dec_in_flight(bs
);