2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "sysemu/replay.h"
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
45 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
46 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
47 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
);
49 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
)
53 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
57 bdrv_parent_drained_begin_single(c
);
61 void bdrv_parent_drained_end_single(BdrvChild
*c
)
65 assert(c
->quiesced_parent
);
66 c
->quiesced_parent
= false;
68 if (c
->klass
->drained_end
) {
69 c
->klass
->drained_end(c
);
73 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
)
77 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
81 bdrv_parent_drained_end_single(c
);
85 bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
87 if (c
->klass
->drained_poll
) {
88 return c
->klass
->drained_poll(c
);
93 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
94 bool ignore_bds_parents
)
99 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
100 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
103 busy
|= bdrv_parent_drained_poll_single(c
);
109 void bdrv_parent_drained_begin_single(BdrvChild
*c
)
113 assert(!c
->quiesced_parent
);
114 c
->quiesced_parent
= true;
116 if (c
->klass
->drained_begin
) {
117 c
->klass
->drained_begin(c
);
121 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
123 dst
->pdiscard_alignment
= MAX(dst
->pdiscard_alignment
,
124 src
->pdiscard_alignment
);
125 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
126 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
127 dst
->max_hw_transfer
= MIN_NON_ZERO(dst
->max_hw_transfer
,
128 src
->max_hw_transfer
);
129 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
130 src
->opt_mem_alignment
);
131 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
132 src
->min_mem_alignment
);
133 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
134 dst
->max_hw_iov
= MIN_NON_ZERO(dst
->max_hw_iov
, src
->max_hw_iov
);
137 typedef struct BdrvRefreshLimitsState
{
138 BlockDriverState
*bs
;
140 } BdrvRefreshLimitsState
;
142 static void bdrv_refresh_limits_abort(void *opaque
)
144 BdrvRefreshLimitsState
*s
= opaque
;
146 s
->bs
->bl
= s
->old_bl
;
149 static TransactionActionDrv bdrv_refresh_limits_drv
= {
150 .abort
= bdrv_refresh_limits_abort
,
154 /* @tran is allowed to be NULL, in this case no rollback is possible. */
155 void bdrv_refresh_limits(BlockDriverState
*bs
, Transaction
*tran
, Error
**errp
)
158 BlockDriver
*drv
= bs
->drv
;
163 assume_graph_lock(); /* FIXME */
166 BdrvRefreshLimitsState
*s
= g_new(BdrvRefreshLimitsState
, 1);
167 *s
= (BdrvRefreshLimitsState
) {
171 tran_add(tran
, &bdrv_refresh_limits_drv
, s
);
174 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
180 /* Default alignment based on whether driver has byte interface */
181 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
182 drv
->bdrv_aio_preadv
||
183 drv
->bdrv_co_preadv_part
) ? 1 : 512;
185 bs
->bl
.has_variable_length
= drv
->has_variable_length
;
187 /* Take some limits from the children as a default */
189 QLIST_FOREACH(c
, &bs
->children
, next
) {
190 if (c
->role
& (BDRV_CHILD_DATA
| BDRV_CHILD_FILTERED
| BDRV_CHILD_COW
))
192 bdrv_merge_limits(&bs
->bl
, &c
->bs
->bl
);
196 if (c
->role
& BDRV_CHILD_FILTERED
) {
197 bs
->bl
.has_variable_length
|= c
->bs
->bl
.has_variable_length
;
202 bs
->bl
.min_mem_alignment
= 512;
203 bs
->bl
.opt_mem_alignment
= qemu_real_host_page_size();
205 /* Safe default since most protocols use readv()/writev()/etc */
206 bs
->bl
.max_iov
= IOV_MAX
;
209 /* Then let the driver override it */
210 if (drv
->bdrv_refresh_limits
) {
211 drv
->bdrv_refresh_limits(bs
, errp
);
217 if (bs
->bl
.request_alignment
> BDRV_MAX_ALIGNMENT
) {
218 error_setg(errp
, "Driver requires too large request alignment");
223 * The copy-on-read flag is actually a reference count so multiple users may
224 * use the feature without worrying about clobbering its previous state.
225 * Copy-on-read stays enabled until all users have called to disable it.
227 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
230 qatomic_inc(&bs
->copy_on_read
);
233 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
235 int old
= qatomic_fetch_dec(&bs
->copy_on_read
);
242 BlockDriverState
*bs
;
249 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
250 bool bdrv_drain_poll(BlockDriverState
*bs
, BdrvChild
*ignore_parent
,
251 bool ignore_bds_parents
)
255 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
259 if (qatomic_read(&bs
->in_flight
)) {
266 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
,
267 BdrvChild
*ignore_parent
)
269 return bdrv_drain_poll(bs
, ignore_parent
, false);
272 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
274 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
);
276 static void bdrv_co_drain_bh_cb(void *opaque
)
278 BdrvCoDrainData
*data
= opaque
;
279 Coroutine
*co
= data
->co
;
280 BlockDriverState
*bs
= data
->bs
;
283 AioContext
*ctx
= bdrv_get_aio_context(bs
);
284 aio_context_acquire(ctx
);
285 bdrv_dec_in_flight(bs
);
287 bdrv_do_drained_begin(bs
, data
->parent
, data
->poll
);
290 bdrv_do_drained_end(bs
, data
->parent
);
292 aio_context_release(ctx
);
295 bdrv_drain_all_begin();
302 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
307 BdrvCoDrainData data
;
308 Coroutine
*self
= qemu_coroutine_self();
309 AioContext
*ctx
= bdrv_get_aio_context(bs
);
310 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(self
);
312 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
313 * other coroutines run if they were queued by aio_co_enter(). */
315 assert(qemu_in_coroutine());
316 data
= (BdrvCoDrainData
) {
326 bdrv_inc_in_flight(bs
);
330 * Temporarily drop the lock across yield or we would get deadlocks.
331 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
333 * When we yield below, the lock for the current context will be
334 * released, so if this is actually the lock that protects bs, don't drop
338 aio_context_release(ctx
);
340 replay_bh_schedule_oneshot_event(ctx
, bdrv_co_drain_bh_cb
, &data
);
342 qemu_coroutine_yield();
343 /* If we are resumed from some other event (such as an aio completion or a
344 * timer callback), it is a bug in the caller that should be fixed. */
347 /* Reaquire the AioContext of bs if we dropped it */
349 aio_context_acquire(ctx
);
353 static void bdrv_do_drained_begin(BlockDriverState
*bs
, BdrvChild
*parent
,
358 if (qemu_in_coroutine()) {
359 bdrv_co_yield_to_drain(bs
, true, parent
, poll
);
363 /* Stop things in parent-to-child order */
364 if (qatomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
365 aio_disable_external(bdrv_get_aio_context(bs
));
366 bdrv_parent_drained_begin(bs
, parent
);
367 if (bs
->drv
&& bs
->drv
->bdrv_drain_begin
) {
368 bs
->drv
->bdrv_drain_begin(bs
);
373 * Wait for drained requests to finish.
375 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
376 * call is needed so things in this AioContext can make progress even
377 * though we don't return to the main AioContext loop - this automatically
378 * includes other nodes in the same AioContext and therefore all child
382 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, parent
));
386 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
, BdrvChild
*parent
)
388 bdrv_do_drained_begin(bs
, parent
, false);
391 void bdrv_drained_begin(BlockDriverState
*bs
)
394 bdrv_do_drained_begin(bs
, NULL
, true);
398 * This function does not poll, nor must any of its recursively called
401 static void bdrv_do_drained_end(BlockDriverState
*bs
, BdrvChild
*parent
)
403 int old_quiesce_counter
;
405 if (qemu_in_coroutine()) {
406 bdrv_co_yield_to_drain(bs
, false, parent
, false);
409 assert(bs
->quiesce_counter
> 0);
411 /* Re-enable things in child-to-parent order */
412 old_quiesce_counter
= qatomic_fetch_dec(&bs
->quiesce_counter
);
413 if (old_quiesce_counter
== 1) {
414 if (bs
->drv
&& bs
->drv
->bdrv_drain_end
) {
415 bs
->drv
->bdrv_drain_end(bs
);
417 bdrv_parent_drained_end(bs
, parent
);
418 aio_enable_external(bdrv_get_aio_context(bs
));
422 void bdrv_drained_end(BlockDriverState
*bs
)
425 bdrv_do_drained_end(bs
, NULL
);
428 void bdrv_drain(BlockDriverState
*bs
)
431 bdrv_drained_begin(bs
);
432 bdrv_drained_end(bs
);
435 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
437 BdrvChild
*child
, *next
;
439 assert(qatomic_read(&bs
->in_flight
) == 0);
440 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
441 bdrv_drain_assert_idle(child
->bs
);
445 unsigned int bdrv_drain_all_count
= 0;
447 static bool bdrv_drain_all_poll(void)
449 BlockDriverState
*bs
= NULL
;
453 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
454 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
455 while ((bs
= bdrv_next_all_states(bs
))) {
456 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
457 aio_context_acquire(aio_context
);
458 result
|= bdrv_drain_poll(bs
, NULL
, true);
459 aio_context_release(aio_context
);
466 * Wait for pending requests to complete across all BlockDriverStates
468 * This function does not flush data to disk, use bdrv_flush_all() for that
469 * after calling this function.
471 * This pauses all block jobs and disables external clients. It must
472 * be paired with bdrv_drain_all_end().
474 * NOTE: no new block jobs or BlockDriverStates can be created between
475 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
477 void bdrv_drain_all_begin_nopoll(void)
479 BlockDriverState
*bs
= NULL
;
483 * bdrv queue is managed by record/replay,
484 * waiting for finishing the I/O requests may
487 if (replay_events_enabled()) {
491 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
492 * loop AioContext, so make sure we're in the main context. */
493 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
494 assert(bdrv_drain_all_count
< INT_MAX
);
495 bdrv_drain_all_count
++;
497 /* Quiesce all nodes, without polling in-flight requests yet. The graph
498 * cannot change during this loop. */
499 while ((bs
= bdrv_next_all_states(bs
))) {
500 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
502 aio_context_acquire(aio_context
);
503 bdrv_do_drained_begin(bs
, NULL
, false);
504 aio_context_release(aio_context
);
508 void bdrv_drain_all_begin(void)
510 BlockDriverState
*bs
= NULL
;
512 if (qemu_in_coroutine()) {
513 bdrv_co_yield_to_drain(NULL
, true, NULL
, true);
518 * bdrv queue is managed by record/replay,
519 * waiting for finishing the I/O requests may
522 if (replay_events_enabled()) {
526 bdrv_drain_all_begin_nopoll();
528 /* Now poll the in-flight requests */
529 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
531 while ((bs
= bdrv_next_all_states(bs
))) {
532 bdrv_drain_assert_idle(bs
);
536 void bdrv_drain_all_end_quiesce(BlockDriverState
*bs
)
540 g_assert(bs
->quiesce_counter
> 0);
541 g_assert(!bs
->refcnt
);
543 while (bs
->quiesce_counter
) {
544 bdrv_do_drained_end(bs
, NULL
);
548 void bdrv_drain_all_end(void)
550 BlockDriverState
*bs
= NULL
;
554 * bdrv queue is managed by record/replay,
555 * waiting for finishing the I/O requests may
558 if (replay_events_enabled()) {
562 while ((bs
= bdrv_next_all_states(bs
))) {
563 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
565 aio_context_acquire(aio_context
);
566 bdrv_do_drained_end(bs
, NULL
);
567 aio_context_release(aio_context
);
570 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
571 assert(bdrv_drain_all_count
> 0);
572 bdrv_drain_all_count
--;
575 void bdrv_drain_all(void)
578 bdrv_drain_all_begin();
579 bdrv_drain_all_end();
583 * Remove an active request from the tracked requests list
585 * This function should be called when a tracked request is completing.
587 static void coroutine_fn
tracked_request_end(BdrvTrackedRequest
*req
)
589 if (req
->serialising
) {
590 qatomic_dec(&req
->bs
->serialising_in_flight
);
593 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
594 QLIST_REMOVE(req
, list
);
595 qemu_co_queue_restart_all(&req
->wait_queue
);
596 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
600 * Add an active request to the tracked requests list
602 static void coroutine_fn
tracked_request_begin(BdrvTrackedRequest
*req
,
603 BlockDriverState
*bs
,
606 enum BdrvTrackedRequestType type
)
608 bdrv_check_request(offset
, bytes
, &error_abort
);
610 *req
= (BdrvTrackedRequest
){
615 .co
= qemu_coroutine_self(),
616 .serialising
= false,
617 .overlap_offset
= offset
,
618 .overlap_bytes
= bytes
,
621 qemu_co_queue_init(&req
->wait_queue
);
623 qemu_co_mutex_lock(&bs
->reqs_lock
);
624 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
625 qemu_co_mutex_unlock(&bs
->reqs_lock
);
628 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
629 int64_t offset
, int64_t bytes
)
631 bdrv_check_request(offset
, bytes
, &error_abort
);
634 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
638 if (req
->overlap_offset
>= offset
+ bytes
) {
644 /* Called with self->bs->reqs_lock held */
645 static coroutine_fn BdrvTrackedRequest
*
646 bdrv_find_conflicting_request(BdrvTrackedRequest
*self
)
648 BdrvTrackedRequest
*req
;
650 QLIST_FOREACH(req
, &self
->bs
->tracked_requests
, list
) {
651 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
654 if (tracked_request_overlaps(req
, self
->overlap_offset
,
655 self
->overlap_bytes
))
658 * Hitting this means there was a reentrant request, for
659 * example, a block driver issuing nested requests. This must
660 * never happen since it means deadlock.
662 assert(qemu_coroutine_self() != req
->co
);
665 * If the request is already (indirectly) waiting for us, or
666 * will wait for us as soon as it wakes up, then just go on
667 * (instead of producing a deadlock in the former case).
669 if (!req
->waiting_for
) {
678 /* Called with self->bs->reqs_lock held */
679 static void coroutine_fn
680 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest
*self
)
682 BdrvTrackedRequest
*req
;
684 while ((req
= bdrv_find_conflicting_request(self
))) {
685 self
->waiting_for
= req
;
686 qemu_co_queue_wait(&req
->wait_queue
, &self
->bs
->reqs_lock
);
687 self
->waiting_for
= NULL
;
691 /* Called with req->bs->reqs_lock held */
692 static void tracked_request_set_serialising(BdrvTrackedRequest
*req
,
695 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
696 int64_t overlap_bytes
=
697 ROUND_UP(req
->offset
+ req
->bytes
, align
) - overlap_offset
;
699 bdrv_check_request(req
->offset
, req
->bytes
, &error_abort
);
701 if (!req
->serialising
) {
702 qatomic_inc(&req
->bs
->serialising_in_flight
);
703 req
->serialising
= true;
706 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
707 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
711 * Return the tracked request on @bs for the current coroutine, or
712 * NULL if there is none.
714 BdrvTrackedRequest
*coroutine_fn
bdrv_co_get_self_request(BlockDriverState
*bs
)
716 BdrvTrackedRequest
*req
;
717 Coroutine
*self
= qemu_coroutine_self();
720 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
721 if (req
->co
== self
) {
730 * Round a region to cluster boundaries
732 void coroutine_fn
bdrv_round_to_clusters(BlockDriverState
*bs
,
733 int64_t offset
, int64_t bytes
,
734 int64_t *cluster_offset
,
735 int64_t *cluster_bytes
)
739 if (bdrv_co_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
740 *cluster_offset
= offset
;
741 *cluster_bytes
= bytes
;
743 int64_t c
= bdi
.cluster_size
;
744 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
745 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
749 static coroutine_fn
int bdrv_get_cluster_size(BlockDriverState
*bs
)
754 ret
= bdrv_co_get_info(bs
, &bdi
);
755 if (ret
< 0 || bdi
.cluster_size
== 0) {
756 return bs
->bl
.request_alignment
;
758 return bdi
.cluster_size
;
762 void bdrv_inc_in_flight(BlockDriverState
*bs
)
765 qatomic_inc(&bs
->in_flight
);
768 void bdrv_wakeup(BlockDriverState
*bs
)
774 void bdrv_dec_in_flight(BlockDriverState
*bs
)
777 qatomic_dec(&bs
->in_flight
);
781 static void coroutine_fn
782 bdrv_wait_serialising_requests(BdrvTrackedRequest
*self
)
784 BlockDriverState
*bs
= self
->bs
;
786 if (!qatomic_read(&bs
->serialising_in_flight
)) {
790 qemu_co_mutex_lock(&bs
->reqs_lock
);
791 bdrv_wait_serialising_requests_locked(self
);
792 qemu_co_mutex_unlock(&bs
->reqs_lock
);
795 void coroutine_fn
bdrv_make_request_serialising(BdrvTrackedRequest
*req
,
800 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
802 tracked_request_set_serialising(req
, align
);
803 bdrv_wait_serialising_requests_locked(req
);
805 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
808 int bdrv_check_qiov_request(int64_t offset
, int64_t bytes
,
809 QEMUIOVector
*qiov
, size_t qiov_offset
,
813 * Check generic offset/bytes correctness
817 error_setg(errp
, "offset is negative: %" PRIi64
, offset
);
822 error_setg(errp
, "bytes is negative: %" PRIi64
, bytes
);
826 if (bytes
> BDRV_MAX_LENGTH
) {
827 error_setg(errp
, "bytes(%" PRIi64
") exceeds maximum(%" PRIi64
")",
828 bytes
, BDRV_MAX_LENGTH
);
832 if (offset
> BDRV_MAX_LENGTH
) {
833 error_setg(errp
, "offset(%" PRIi64
") exceeds maximum(%" PRIi64
")",
834 offset
, BDRV_MAX_LENGTH
);
838 if (offset
> BDRV_MAX_LENGTH
- bytes
) {
839 error_setg(errp
, "sum of offset(%" PRIi64
") and bytes(%" PRIi64
") "
840 "exceeds maximum(%" PRIi64
")", offset
, bytes
,
850 * Check qiov and qiov_offset
853 if (qiov_offset
> qiov
->size
) {
854 error_setg(errp
, "qiov_offset(%zu) overflow io vector size(%zu)",
855 qiov_offset
, qiov
->size
);
859 if (bytes
> qiov
->size
- qiov_offset
) {
860 error_setg(errp
, "bytes(%" PRIi64
") + qiov_offset(%zu) overflow io "
861 "vector size(%zu)", bytes
, qiov_offset
, qiov
->size
);
868 int bdrv_check_request(int64_t offset
, int64_t bytes
, Error
**errp
)
870 return bdrv_check_qiov_request(offset
, bytes
, NULL
, 0, errp
);
873 static int bdrv_check_request32(int64_t offset
, int64_t bytes
,
874 QEMUIOVector
*qiov
, size_t qiov_offset
)
876 int ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
881 if (bytes
> BDRV_REQUEST_MAX_BYTES
) {
889 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
890 * The operation is sped up by checking the block status and only writing
891 * zeroes to the device if they currently do not return zeroes. Optional
892 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
895 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
897 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
900 int64_t target_size
, bytes
, offset
= 0;
901 BlockDriverState
*bs
= child
->bs
;
904 target_size
= bdrv_getlength(bs
);
905 if (target_size
< 0) {
910 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
914 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
918 if (ret
& BDRV_BLOCK_ZERO
) {
922 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
931 * Writes to the file and ensures that no writes are reordered across this
932 * request (acts as a barrier)
934 * Returns 0 on success, -errno in error cases.
936 int coroutine_fn
bdrv_co_pwrite_sync(BdrvChild
*child
, int64_t offset
,
937 int64_t bytes
, const void *buf
,
938 BdrvRequestFlags flags
)
942 assert_bdrv_graph_readable();
944 ret
= bdrv_co_pwrite(child
, offset
, bytes
, buf
, flags
);
949 ret
= bdrv_co_flush(child
->bs
);
957 typedef struct CoroutineIOCompletion
{
958 Coroutine
*coroutine
;
960 } CoroutineIOCompletion
;
962 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
964 CoroutineIOCompletion
*co
= opaque
;
967 aio_co_wake(co
->coroutine
);
970 static int coroutine_fn GRAPH_RDLOCK
971 bdrv_driver_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
972 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
974 BlockDriver
*drv
= bs
->drv
;
976 unsigned int nb_sectors
;
977 QEMUIOVector local_qiov
;
979 assert_bdrv_graph_readable();
981 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
982 assert(!(flags
& ~bs
->supported_read_flags
));
988 if (drv
->bdrv_co_preadv_part
) {
989 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
993 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
994 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
998 if (drv
->bdrv_co_preadv
) {
999 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
1003 if (drv
->bdrv_aio_preadv
) {
1005 CoroutineIOCompletion co
= {
1006 .coroutine
= qemu_coroutine_self(),
1009 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1010 bdrv_co_io_em_complete
, &co
);
1015 qemu_coroutine_yield();
1021 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1022 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1024 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1025 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1026 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1027 assert(drv
->bdrv_co_readv
);
1029 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1032 if (qiov
== &local_qiov
) {
1033 qemu_iovec_destroy(&local_qiov
);
1039 static int coroutine_fn GRAPH_RDLOCK
1040 bdrv_driver_pwritev(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1041 QEMUIOVector
*qiov
, size_t qiov_offset
,
1042 BdrvRequestFlags flags
)
1044 BlockDriver
*drv
= bs
->drv
;
1045 bool emulate_fua
= false;
1047 unsigned int nb_sectors
;
1048 QEMUIOVector local_qiov
;
1050 assert_bdrv_graph_readable();
1052 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1058 if ((flags
& BDRV_REQ_FUA
) &&
1059 (~bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1060 flags
&= ~BDRV_REQ_FUA
;
1064 flags
&= bs
->supported_write_flags
;
1066 if (drv
->bdrv_co_pwritev_part
) {
1067 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1072 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1073 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1077 if (drv
->bdrv_co_pwritev
) {
1078 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1082 if (drv
->bdrv_aio_pwritev
) {
1084 CoroutineIOCompletion co
= {
1085 .coroutine
= qemu_coroutine_self(),
1088 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
, flags
,
1089 bdrv_co_io_em_complete
, &co
);
1093 qemu_coroutine_yield();
1099 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1100 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1102 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1103 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1104 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1106 assert(drv
->bdrv_co_writev
);
1107 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
, flags
);
1110 if (ret
== 0 && emulate_fua
) {
1111 ret
= bdrv_co_flush(bs
);
1114 if (qiov
== &local_qiov
) {
1115 qemu_iovec_destroy(&local_qiov
);
1121 static int coroutine_fn GRAPH_RDLOCK
1122 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, int64_t offset
,
1123 int64_t bytes
, QEMUIOVector
*qiov
,
1126 BlockDriver
*drv
= bs
->drv
;
1127 QEMUIOVector local_qiov
;
1129 assert_bdrv_graph_readable();
1131 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1137 if (!block_driver_can_compress(drv
)) {
1141 if (drv
->bdrv_co_pwritev_compressed_part
) {
1142 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1146 if (qiov_offset
== 0) {
1147 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1150 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1151 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1152 qemu_iovec_destroy(&local_qiov
);
1157 static int coroutine_fn GRAPH_RDLOCK
1158 bdrv_co_do_copy_on_readv(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1159 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1161 BlockDriverState
*bs
= child
->bs
;
1163 /* Perform I/O through a temporary buffer so that users who scribble over
1164 * their read buffer while the operation is in progress do not end up
1165 * modifying the image file. This is critical for zero-copy guest I/O
1166 * where anything might happen inside guest memory.
1168 void *bounce_buffer
= NULL
;
1170 BlockDriver
*drv
= bs
->drv
;
1171 int64_t cluster_offset
;
1172 int64_t cluster_bytes
;
1175 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1176 BDRV_REQUEST_MAX_BYTES
);
1177 int64_t progress
= 0;
1180 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1187 * Do not write anything when the BDS is inactive. That is not
1188 * allowed, and it would not help.
1190 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1192 /* FIXME We cannot require callers to have write permissions when all they
1193 * are doing is a read request. If we did things right, write permissions
1194 * would be obtained anyway, but internally by the copy-on-read code. As
1195 * long as it is implemented here rather than in a separate filter driver,
1196 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1197 * it could request permissions. Therefore we have to bypass the permission
1198 * system for the moment. */
1199 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1201 /* Cover entire cluster so no additional backing file I/O is required when
1202 * allocating cluster in the image file. Note that this value may exceed
1203 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1204 * is one reason we loop rather than doing it all at once.
1206 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1207 skip_bytes
= offset
- cluster_offset
;
1209 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1210 cluster_offset
, cluster_bytes
);
1212 while (cluster_bytes
) {
1216 ret
= 1; /* "already allocated", so nothing will be copied */
1217 pnum
= MIN(cluster_bytes
, max_transfer
);
1219 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1220 MIN(cluster_bytes
, max_transfer
), &pnum
);
1223 * Safe to treat errors in querying allocation as if
1224 * unallocated; we'll probably fail again soon on the
1225 * read, but at least that will set a decent errno.
1227 pnum
= MIN(cluster_bytes
, max_transfer
);
1230 /* Stop at EOF if the image ends in the middle of the cluster */
1231 if (ret
== 0 && pnum
== 0) {
1232 assert(progress
>= bytes
);
1236 assert(skip_bytes
< pnum
);
1240 QEMUIOVector local_qiov
;
1242 /* Must copy-on-read; use the bounce buffer */
1243 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1244 if (!bounce_buffer
) {
1245 int64_t max_we_need
= MAX(pnum
, cluster_bytes
- pnum
);
1246 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1247 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1249 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1250 if (!bounce_buffer
) {
1255 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1257 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1263 bdrv_co_debug_event(bs
, BLKDBG_COR_WRITE
);
1264 if (drv
->bdrv_co_pwrite_zeroes
&&
1265 buffer_is_zero(bounce_buffer
, pnum
)) {
1266 /* FIXME: Should we (perhaps conditionally) be setting
1267 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1268 * that still correctly reads as zero? */
1269 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1270 BDRV_REQ_WRITE_UNCHANGED
);
1272 /* This does not change the data on the disk, it is not
1273 * necessary to flush even in cache=writethrough mode.
1275 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1277 BDRV_REQ_WRITE_UNCHANGED
);
1281 /* It might be okay to ignore write errors for guest
1282 * requests. If this is a deliberate copy-on-read
1283 * then we don't want to ignore the error. Simply
1284 * report it in all cases.
1289 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1290 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1291 bounce_buffer
+ skip_bytes
,
1292 MIN(pnum
- skip_bytes
, bytes
- progress
));
1294 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1295 /* Read directly into the destination */
1296 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1297 MIN(pnum
- skip_bytes
, bytes
- progress
),
1298 qiov
, qiov_offset
+ progress
, 0);
1304 cluster_offset
+= pnum
;
1305 cluster_bytes
-= pnum
;
1306 progress
+= pnum
- skip_bytes
;
1312 qemu_vfree(bounce_buffer
);
1317 * Forwards an already correctly aligned request to the BlockDriver. This
1318 * handles copy on read, zeroing after EOF, and fragmentation of large
1319 * reads; any other features must be implemented by the caller.
1321 static int coroutine_fn GRAPH_RDLOCK
1322 bdrv_aligned_preadv(BdrvChild
*child
, BdrvTrackedRequest
*req
,
1323 int64_t offset
, int64_t bytes
, int64_t align
,
1324 QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1326 BlockDriverState
*bs
= child
->bs
;
1327 int64_t total_bytes
, max_bytes
;
1329 int64_t bytes_remaining
= bytes
;
1332 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1333 assert(is_power_of_2(align
));
1334 assert((offset
& (align
- 1)) == 0);
1335 assert((bytes
& (align
- 1)) == 0);
1336 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1337 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1341 * TODO: We would need a per-BDS .supported_read_flags and
1342 * potential fallback support, if we ever implement any read flags
1343 * to pass through to drivers. For now, there aren't any
1344 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1346 assert(!(flags
& ~(BDRV_REQ_COPY_ON_READ
| BDRV_REQ_PREFETCH
|
1347 BDRV_REQ_REGISTERED_BUF
)));
1349 /* Handle Copy on Read and associated serialisation */
1350 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1351 /* If we touch the same cluster it counts as an overlap. This
1352 * guarantees that allocating writes will be serialized and not race
1353 * with each other for the same cluster. For example, in copy-on-read
1354 * it ensures that the CoR read and write operations are atomic and
1355 * guest writes cannot interleave between them. */
1356 bdrv_make_request_serialising(req
, bdrv_get_cluster_size(bs
));
1358 bdrv_wait_serialising_requests(req
);
1361 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1364 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1365 flags
&= ~BDRV_REQ_COPY_ON_READ
;
1367 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1372 if (!ret
|| pnum
!= bytes
) {
1373 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1374 qiov
, qiov_offset
, flags
);
1376 } else if (flags
& BDRV_REQ_PREFETCH
) {
1381 /* Forward the request to the BlockDriver, possibly fragmenting it */
1382 total_bytes
= bdrv_getlength(bs
);
1383 if (total_bytes
< 0) {
1388 assert(!(flags
& ~(bs
->supported_read_flags
| BDRV_REQ_REGISTERED_BUF
)));
1390 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1391 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1392 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1396 while (bytes_remaining
) {
1400 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1403 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1405 qiov_offset
+ bytes
- bytes_remaining
,
1409 num
= bytes_remaining
;
1410 ret
= qemu_iovec_memset(qiov
, qiov_offset
+ bytes
- bytes_remaining
,
1411 0, bytes_remaining
);
1416 bytes_remaining
-= num
;
1420 return ret
< 0 ? ret
: 0;
1426 * |<---- align ----->| |<----- align ---->|
1427 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1429 * -*----------$-------*-------- ... --------*-----$------------*---
1431 * | offset | | end |
1432 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1433 * [buf ... ) [tail_buf )
1435 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1436 * is placed at the beginning of @buf and @tail at the @end.
1438 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1439 * around tail, if tail exists.
1441 * @merge_reads is true for small requests,
1442 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1443 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1445 typedef struct BdrvRequestPadding
{
1452 QEMUIOVector local_qiov
;
1453 } BdrvRequestPadding
;
1455 static bool bdrv_init_padding(BlockDriverState
*bs
,
1456 int64_t offset
, int64_t bytes
,
1457 BdrvRequestPadding
*pad
)
1459 int64_t align
= bs
->bl
.request_alignment
;
1462 bdrv_check_request(offset
, bytes
, &error_abort
);
1463 assert(align
<= INT_MAX
); /* documented in block/block_int.h */
1464 assert(align
<= SIZE_MAX
/ 2); /* so we can allocate the buffer */
1466 memset(pad
, 0, sizeof(*pad
));
1468 pad
->head
= offset
& (align
- 1);
1469 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1471 pad
->tail
= align
- pad
->tail
;
1474 if (!pad
->head
&& !pad
->tail
) {
1478 assert(bytes
); /* Nothing good in aligning zero-length requests */
1480 sum
= pad
->head
+ bytes
+ pad
->tail
;
1481 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1482 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1483 pad
->merge_reads
= sum
== pad
->buf_len
;
1485 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1491 static int coroutine_fn GRAPH_RDLOCK
1492 bdrv_padding_rmw_read(BdrvChild
*child
, BdrvTrackedRequest
*req
,
1493 BdrvRequestPadding
*pad
, bool zero_middle
)
1495 QEMUIOVector local_qiov
;
1496 BlockDriverState
*bs
= child
->bs
;
1497 uint64_t align
= bs
->bl
.request_alignment
;
1500 assert(req
->serialising
&& pad
->buf
);
1502 if (pad
->head
|| pad
->merge_reads
) {
1503 int64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1505 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1508 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1510 if (pad
->merge_reads
&& pad
->tail
) {
1511 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1513 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1514 align
, &local_qiov
, 0, 0);
1519 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1521 if (pad
->merge_reads
&& pad
->tail
) {
1522 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1525 if (pad
->merge_reads
) {
1531 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1533 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1534 ret
= bdrv_aligned_preadv(
1536 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1537 align
, align
, &local_qiov
, 0, 0);
1541 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1546 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1552 static void bdrv_padding_destroy(BdrvRequestPadding
*pad
)
1555 qemu_vfree(pad
->buf
);
1556 qemu_iovec_destroy(&pad
->local_qiov
);
1558 memset(pad
, 0, sizeof(*pad
));
1564 * Exchange request parameters with padded request if needed. Don't include RMW
1565 * read of padding, bdrv_padding_rmw_read() should be called separately if
1568 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1569 * - on function start they represent original request
1570 * - on failure or when padding is not needed they are unchanged
1571 * - on success when padding is needed they represent padded request
1573 static int bdrv_pad_request(BlockDriverState
*bs
,
1574 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1575 int64_t *offset
, int64_t *bytes
,
1576 BdrvRequestPadding
*pad
, bool *padded
,
1577 BdrvRequestFlags
*flags
)
1581 bdrv_check_qiov_request(*offset
, *bytes
, *qiov
, *qiov_offset
, &error_abort
);
1583 if (!bdrv_init_padding(bs
, *offset
, *bytes
, pad
)) {
1590 ret
= qemu_iovec_init_extended(&pad
->local_qiov
, pad
->buf
, pad
->head
,
1591 *qiov
, *qiov_offset
, *bytes
,
1592 pad
->buf
+ pad
->buf_len
- pad
->tail
,
1595 bdrv_padding_destroy(pad
);
1598 *bytes
+= pad
->head
+ pad
->tail
;
1599 *offset
-= pad
->head
;
1600 *qiov
= &pad
->local_qiov
;
1606 /* Can't use optimization hint with bounce buffer */
1607 *flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1613 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1614 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1615 BdrvRequestFlags flags
)
1618 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1621 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1622 int64_t offset
, int64_t bytes
,
1623 QEMUIOVector
*qiov
, size_t qiov_offset
,
1624 BdrvRequestFlags flags
)
1626 BlockDriverState
*bs
= child
->bs
;
1627 BdrvTrackedRequest req
;
1628 BdrvRequestPadding pad
;
1632 trace_bdrv_co_preadv_part(bs
, offset
, bytes
, flags
);
1634 if (!bdrv_co_is_inserted(bs
)) {
1638 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
1643 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
1645 * Aligning zero request is nonsense. Even if driver has special meaning
1646 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1647 * it to driver due to request_alignment.
1649 * Still, no reason to return an error if someone do unaligned
1650 * zero-length read occasionally.
1655 bdrv_inc_in_flight(bs
);
1657 /* Don't do copy-on-read if we read data before write operation */
1658 if (qatomic_read(&bs
->copy_on_read
)) {
1659 flags
|= BDRV_REQ_COPY_ON_READ
;
1662 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
1668 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1669 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1670 bs
->bl
.request_alignment
,
1671 qiov
, qiov_offset
, flags
);
1672 tracked_request_end(&req
);
1673 bdrv_padding_destroy(&pad
);
1676 bdrv_dec_in_flight(bs
);
1681 static int coroutine_fn GRAPH_RDLOCK
1682 bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1683 BdrvRequestFlags flags
)
1685 BlockDriver
*drv
= bs
->drv
;
1689 bool need_flush
= false;
1693 int64_t max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
,
1695 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1696 bs
->bl
.request_alignment
);
1697 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1699 assert_bdrv_graph_readable();
1700 bdrv_check_request(offset
, bytes
, &error_abort
);
1706 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1710 /* By definition there is no user buffer so this flag doesn't make sense */
1711 if (flags
& BDRV_REQ_REGISTERED_BUF
) {
1715 /* Invalidate the cached block-status data range if this write overlaps */
1716 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
1718 assert(alignment
% bs
->bl
.request_alignment
== 0);
1719 head
= offset
% alignment
;
1720 tail
= (offset
+ bytes
) % alignment
;
1721 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1722 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1724 while (bytes
> 0 && !ret
) {
1725 int64_t num
= bytes
;
1727 /* Align request. Block drivers can expect the "bulk" of the request
1728 * to be aligned, and that unaligned requests do not cross cluster
1732 /* Make a small request up to the first aligned sector. For
1733 * convenience, limit this request to max_transfer even if
1734 * we don't need to fall back to writes. */
1735 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1736 head
= (head
+ num
) % alignment
;
1737 assert(num
< max_write_zeroes
);
1738 } else if (tail
&& num
> alignment
) {
1739 /* Shorten the request to the last aligned sector. */
1743 /* limit request size */
1744 if (num
> max_write_zeroes
) {
1745 num
= max_write_zeroes
;
1749 /* First try the efficient write zeroes operation */
1750 if (drv
->bdrv_co_pwrite_zeroes
) {
1751 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1752 flags
& bs
->supported_zero_flags
);
1753 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1754 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1758 assert(!bs
->supported_zero_flags
);
1761 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1762 /* Fall back to bounce buffer if write zeroes is unsupported */
1763 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1765 if ((flags
& BDRV_REQ_FUA
) &&
1766 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1767 /* No need for bdrv_driver_pwrite() to do a fallback
1768 * flush on each chunk; use just one at the end */
1769 write_flags
&= ~BDRV_REQ_FUA
;
1772 num
= MIN(num
, max_transfer
);
1774 buf
= qemu_try_blockalign0(bs
, num
);
1780 qemu_iovec_init_buf(&qiov
, buf
, num
);
1782 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1784 /* Keep bounce buffer around if it is big enough for all
1785 * all future requests.
1787 if (num
< max_transfer
) {
1798 if (ret
== 0 && need_flush
) {
1799 ret
= bdrv_co_flush(bs
);
1805 static inline int coroutine_fn
1806 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1807 BdrvTrackedRequest
*req
, int flags
)
1809 BlockDriverState
*bs
= child
->bs
;
1811 bdrv_check_request(offset
, bytes
, &error_abort
);
1813 if (bdrv_is_read_only(bs
)) {
1817 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1818 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1819 assert(!(flags
& ~BDRV_REQ_MASK
));
1820 assert(!((flags
& BDRV_REQ_NO_WAIT
) && !(flags
& BDRV_REQ_SERIALISING
)));
1822 if (flags
& BDRV_REQ_SERIALISING
) {
1823 QEMU_LOCK_GUARD(&bs
->reqs_lock
);
1825 tracked_request_set_serialising(req
, bdrv_get_cluster_size(bs
));
1827 if ((flags
& BDRV_REQ_NO_WAIT
) && bdrv_find_conflicting_request(req
)) {
1831 bdrv_wait_serialising_requests_locked(req
);
1833 bdrv_wait_serialising_requests(req
);
1836 assert(req
->overlap_offset
<= offset
);
1837 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1838 assert(offset
+ bytes
<= bs
->total_sectors
* BDRV_SECTOR_SIZE
||
1839 child
->perm
& BLK_PERM_RESIZE
);
1841 switch (req
->type
) {
1842 case BDRV_TRACKED_WRITE
:
1843 case BDRV_TRACKED_DISCARD
:
1844 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1845 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1847 assert(child
->perm
& BLK_PERM_WRITE
);
1849 bdrv_write_threshold_check_write(bs
, offset
, bytes
);
1851 case BDRV_TRACKED_TRUNCATE
:
1852 assert(child
->perm
& BLK_PERM_RESIZE
);
1859 static inline void coroutine_fn
1860 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1861 BdrvTrackedRequest
*req
, int ret
)
1863 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1864 BlockDriverState
*bs
= child
->bs
;
1866 bdrv_check_request(offset
, bytes
, &error_abort
);
1868 qatomic_inc(&bs
->write_gen
);
1871 * Discard cannot extend the image, but in error handling cases, such as
1872 * when reverting a qcow2 cluster allocation, the discarded range can pass
1873 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1874 * here. Instead, just skip it, since semantically a discard request
1875 * beyond EOF cannot expand the image anyway.
1878 (req
->type
== BDRV_TRACKED_TRUNCATE
||
1879 end_sector
> bs
->total_sectors
) &&
1880 req
->type
!= BDRV_TRACKED_DISCARD
) {
1881 bs
->total_sectors
= end_sector
;
1882 bdrv_parent_cb_resize(bs
);
1883 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
1886 switch (req
->type
) {
1887 case BDRV_TRACKED_WRITE
:
1888 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1889 /* fall through, to set dirty bits */
1890 case BDRV_TRACKED_DISCARD
:
1891 bdrv_set_dirty(bs
, offset
, bytes
);
1900 * Forwards an already correctly aligned write request to the BlockDriver,
1901 * after possibly fragmenting it.
1903 static int coroutine_fn GRAPH_RDLOCK
1904 bdrv_aligned_pwritev(BdrvChild
*child
, BdrvTrackedRequest
*req
,
1905 int64_t offset
, int64_t bytes
, int64_t align
,
1906 QEMUIOVector
*qiov
, size_t qiov_offset
,
1907 BdrvRequestFlags flags
)
1909 BlockDriverState
*bs
= child
->bs
;
1910 BlockDriver
*drv
= bs
->drv
;
1913 int64_t bytes_remaining
= bytes
;
1916 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1922 if (bdrv_has_readonly_bitmaps(bs
)) {
1926 assert(is_power_of_2(align
));
1927 assert((offset
& (align
- 1)) == 0);
1928 assert((bytes
& (align
- 1)) == 0);
1929 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1932 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
1934 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1935 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1936 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
1937 flags
|= BDRV_REQ_ZERO_WRITE
;
1938 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1939 flags
|= BDRV_REQ_MAY_UNMAP
;
1942 /* Can't use optimization hint with bufferless zero write */
1943 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1947 /* Do nothing, write notifier decided to fail this request */
1948 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1949 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1950 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1951 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1952 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
1954 } else if (bytes
<= max_transfer
) {
1955 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV
);
1956 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1958 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV
);
1959 while (bytes_remaining
) {
1960 int num
= MIN(bytes_remaining
, max_transfer
);
1961 int local_flags
= flags
;
1964 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1965 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1966 /* If FUA is going to be emulated by flush, we only
1967 * need to flush on the last iteration */
1968 local_flags
&= ~BDRV_REQ_FUA
;
1971 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1973 qiov_offset
+ bytes
- bytes_remaining
,
1978 bytes_remaining
-= num
;
1981 bdrv_co_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1986 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
1991 static int coroutine_fn GRAPH_RDLOCK
1992 bdrv_co_do_zero_pwritev(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1993 BdrvRequestFlags flags
, BdrvTrackedRequest
*req
)
1995 BlockDriverState
*bs
= child
->bs
;
1996 QEMUIOVector local_qiov
;
1997 uint64_t align
= bs
->bl
.request_alignment
;
2000 BdrvRequestPadding pad
;
2002 /* This flag doesn't make sense for padding or zero writes */
2003 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
2005 padding
= bdrv_init_padding(bs
, offset
, bytes
, &pad
);
2007 assert(!(flags
& BDRV_REQ_NO_WAIT
));
2008 bdrv_make_request_serialising(req
, align
);
2010 bdrv_padding_rmw_read(child
, req
, &pad
, true);
2012 if (pad
.head
|| pad
.merge_reads
) {
2013 int64_t aligned_offset
= offset
& ~(align
- 1);
2014 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2016 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2017 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2018 align
, &local_qiov
, 0,
2019 flags
& ~BDRV_REQ_ZERO_WRITE
);
2020 if (ret
< 0 || pad
.merge_reads
) {
2021 /* Error or all work is done */
2024 offset
+= write_bytes
- pad
.head
;
2025 bytes
-= write_bytes
- pad
.head
;
2029 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2030 if (bytes
>= align
) {
2031 /* Write the aligned part in the middle. */
2032 int64_t aligned_bytes
= bytes
& ~(align
- 1);
2033 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2038 bytes
-= aligned_bytes
;
2039 offset
+= aligned_bytes
;
2042 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2044 assert(align
== pad
.tail
+ bytes
);
2046 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2047 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2049 flags
& ~BDRV_REQ_ZERO_WRITE
);
2053 bdrv_padding_destroy(&pad
);
2059 * Handle a write request in coroutine context
2061 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2062 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
2063 BdrvRequestFlags flags
)
2066 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2069 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2070 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2071 BdrvRequestFlags flags
)
2073 BlockDriverState
*bs
= child
->bs
;
2074 BdrvTrackedRequest req
;
2075 uint64_t align
= bs
->bl
.request_alignment
;
2076 BdrvRequestPadding pad
;
2078 bool padded
= false;
2081 trace_bdrv_co_pwritev_part(child
->bs
, offset
, bytes
, flags
);
2083 if (!bdrv_co_is_inserted(bs
)) {
2087 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2088 ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
2090 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
2096 /* If the request is misaligned then we can't make it efficient */
2097 if ((flags
& BDRV_REQ_NO_FALLBACK
) &&
2098 !QEMU_IS_ALIGNED(offset
| bytes
, align
))
2103 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
2105 * Aligning zero request is nonsense. Even if driver has special meaning
2106 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2107 * it to driver due to request_alignment.
2109 * Still, no reason to return an error if someone do unaligned
2110 * zero-length write occasionally.
2115 if (!(flags
& BDRV_REQ_ZERO_WRITE
)) {
2117 * Pad request for following read-modify-write cycle.
2118 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2119 * alignment only if there is no ZERO flag.
2121 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
2128 bdrv_inc_in_flight(bs
);
2129 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2131 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2133 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2139 * Request was unaligned to request_alignment and therefore
2140 * padded. We are going to do read-modify-write, and must
2141 * serialize the request to prevent interactions of the
2142 * widened region with other transactions.
2144 assert(!(flags
& BDRV_REQ_NO_WAIT
));
2145 bdrv_make_request_serialising(&req
, align
);
2146 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2149 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2150 qiov
, qiov_offset
, flags
);
2152 bdrv_padding_destroy(&pad
);
2155 tracked_request_end(&req
);
2156 bdrv_dec_in_flight(bs
);
2161 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2162 int64_t bytes
, BdrvRequestFlags flags
)
2165 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2166 assert_bdrv_graph_readable();
2168 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2169 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2172 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2173 BDRV_REQ_ZERO_WRITE
| flags
);
2177 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2179 int bdrv_flush_all(void)
2181 BdrvNextIterator it
;
2182 BlockDriverState
*bs
= NULL
;
2185 GLOBAL_STATE_CODE();
2188 * bdrv queue is managed by record/replay,
2189 * creating new flush request for stopping
2190 * the VM may break the determinism
2192 if (replay_events_enabled()) {
2196 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2197 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2200 aio_context_acquire(aio_context
);
2201 ret
= bdrv_flush(bs
);
2202 if (ret
< 0 && !result
) {
2205 aio_context_release(aio_context
);
2212 * Returns the allocation status of the specified sectors.
2213 * Drivers not implementing the functionality are assumed to not support
2214 * backing files, hence all their sectors are reported as allocated.
2216 * If 'want_zero' is true, the caller is querying for mapping
2217 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2218 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2219 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2221 * If 'offset' is beyond the end of the disk image the return value is
2222 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2224 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2225 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2226 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2228 * 'pnum' is set to the number of bytes (including and immediately
2229 * following the specified offset) that are easily known to be in the
2230 * same allocated/unallocated state. Note that a second call starting
2231 * at the original offset plus returned pnum may have the same status.
2232 * The returned value is non-zero on success except at end-of-file.
2234 * Returns negative errno on failure. Otherwise, if the
2235 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2236 * set to the host mapping and BDS corresponding to the guest offset.
2238 static int coroutine_fn GRAPH_RDLOCK
2239 bdrv_co_block_status(BlockDriverState
*bs
, bool want_zero
,
2240 int64_t offset
, int64_t bytes
,
2241 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2244 int64_t n
; /* bytes */
2246 int64_t local_map
= 0;
2247 BlockDriverState
*local_file
= NULL
;
2248 int64_t aligned_offset
, aligned_bytes
;
2250 bool has_filtered_child
;
2253 assert_bdrv_graph_readable();
2255 total_size
= bdrv_getlength(bs
);
2256 if (total_size
< 0) {
2261 if (offset
>= total_size
) {
2262 ret
= BDRV_BLOCK_EOF
;
2270 n
= total_size
- offset
;
2275 /* Must be non-NULL or bdrv_getlength() would have failed */
2277 has_filtered_child
= bdrv_filter_child(bs
);
2278 if (!bs
->drv
->bdrv_co_block_status
&& !has_filtered_child
) {
2280 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2281 if (offset
+ bytes
== total_size
) {
2282 ret
|= BDRV_BLOCK_EOF
;
2284 if (bs
->drv
->protocol_name
) {
2285 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2292 bdrv_inc_in_flight(bs
);
2294 /* Round out to request_alignment boundaries */
2295 align
= bs
->bl
.request_alignment
;
2296 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2297 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2299 if (bs
->drv
->bdrv_co_block_status
) {
2301 * Use the block-status cache only for protocol nodes: Format
2302 * drivers are generally quick to inquire the status, but protocol
2303 * drivers often need to get information from outside of qemu, so
2304 * we do not have control over the actual implementation. There
2305 * have been cases where inquiring the status took an unreasonably
2306 * long time, and we can do nothing in qemu to fix it.
2307 * This is especially problematic for images with large data areas,
2308 * because finding the few holes in them and giving them special
2309 * treatment does not gain much performance. Therefore, we try to
2310 * cache the last-identified data region.
2312 * Second, limiting ourselves to protocol nodes allows us to assume
2313 * the block status for data regions to be DATA | OFFSET_VALID, and
2314 * that the host offset is the same as the guest offset.
2316 * Note that it is possible that external writers zero parts of
2317 * the cached regions without the cache being invalidated, and so
2318 * we may report zeroes as data. This is not catastrophic,
2319 * however, because reporting zeroes as data is fine.
2321 if (QLIST_EMPTY(&bs
->children
) &&
2322 bdrv_bsc_is_data(bs
, aligned_offset
, pnum
))
2324 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
2326 local_map
= aligned_offset
;
2328 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2329 aligned_bytes
, pnum
, &local_map
,
2333 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2334 * the cache is queried above. Technically, we do not need to check
2335 * it here; the worst that can happen is that we fill the cache for
2336 * non-protocol nodes, and then it is never used. However, filling
2337 * the cache requires an RCU update, so double check here to avoid
2338 * such an update if possible.
2340 * Check want_zero, because we only want to update the cache when we
2341 * have accurate information about what is zero and what is data.
2344 ret
== (BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
) &&
2345 QLIST_EMPTY(&bs
->children
))
2348 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2349 * returned local_map value must be the same as the offset we
2350 * have passed (aligned_offset), and local_bs must be the node
2352 * Assert this, because we follow this rule when reading from
2353 * the cache (see the `local_file = bs` and
2354 * `local_map = aligned_offset` assignments above), and the
2355 * result the cache delivers must be the same as the driver
2358 assert(local_file
== bs
);
2359 assert(local_map
== aligned_offset
);
2360 bdrv_bsc_fill(bs
, aligned_offset
, *pnum
);
2364 /* Default code for filters */
2366 local_file
= bdrv_filter_bs(bs
);
2369 *pnum
= aligned_bytes
;
2370 local_map
= aligned_offset
;
2371 ret
= BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2379 * The driver's result must be a non-zero multiple of request_alignment.
2380 * Clamp pnum and adjust map to original request.
2382 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2383 align
> offset
- aligned_offset
);
2384 if (ret
& BDRV_BLOCK_RECURSE
) {
2385 assert(ret
& BDRV_BLOCK_DATA
);
2386 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2387 assert(!(ret
& BDRV_BLOCK_ZERO
));
2390 *pnum
-= offset
- aligned_offset
;
2391 if (*pnum
> bytes
) {
2394 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2395 local_map
+= offset
- aligned_offset
;
2398 if (ret
& BDRV_BLOCK_RAW
) {
2399 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2400 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2401 *pnum
, pnum
, &local_map
, &local_file
);
2405 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2406 ret
|= BDRV_BLOCK_ALLOCATED
;
2407 } else if (bs
->drv
->supports_backing
) {
2408 BlockDriverState
*cow_bs
= bdrv_cow_bs(bs
);
2411 ret
|= BDRV_BLOCK_ZERO
;
2412 } else if (want_zero
) {
2413 int64_t size2
= bdrv_getlength(cow_bs
);
2415 if (size2
>= 0 && offset
>= size2
) {
2416 ret
|= BDRV_BLOCK_ZERO
;
2421 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2422 local_file
&& local_file
!= bs
&&
2423 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2424 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2428 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2429 *pnum
, &file_pnum
, NULL
, NULL
);
2431 /* Ignore errors. This is just providing extra information, it
2432 * is useful but not necessary.
2434 if (ret2
& BDRV_BLOCK_EOF
&&
2435 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2437 * It is valid for the format block driver to read
2438 * beyond the end of the underlying file's current
2439 * size; such areas read as zero.
2441 ret
|= BDRV_BLOCK_ZERO
;
2443 /* Limit request to the range reported by the protocol driver */
2445 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2451 bdrv_dec_in_flight(bs
);
2452 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2453 ret
|= BDRV_BLOCK_EOF
;
2466 bdrv_co_common_block_status_above(BlockDriverState
*bs
,
2467 BlockDriverState
*base
,
2474 BlockDriverState
**file
,
2478 BlockDriverState
*p
;
2483 assert(!include_base
|| base
); /* Can't include NULL base */
2484 assert_bdrv_graph_readable();
2491 if (!include_base
&& bs
== base
) {
2496 ret
= bdrv_co_block_status(bs
, want_zero
, offset
, bytes
, pnum
, map
, file
);
2498 if (ret
< 0 || *pnum
== 0 || ret
& BDRV_BLOCK_ALLOCATED
|| bs
== base
) {
2502 if (ret
& BDRV_BLOCK_EOF
) {
2503 eof
= offset
+ *pnum
;
2506 assert(*pnum
<= bytes
);
2509 for (p
= bdrv_filter_or_cow_bs(bs
); include_base
|| p
!= base
;
2510 p
= bdrv_filter_or_cow_bs(p
))
2512 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2520 * The top layer deferred to this layer, and because this layer is
2521 * short, any zeroes that we synthesize beyond EOF behave as if they
2522 * were allocated at this layer.
2524 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2525 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2528 assert(ret
& BDRV_BLOCK_EOF
);
2533 ret
= BDRV_BLOCK_ZERO
| BDRV_BLOCK_ALLOCATED
;
2536 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2538 * We've found the node and the status, we must break.
2540 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2541 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2544 ret
&= ~BDRV_BLOCK_EOF
;
2549 assert(include_base
);
2554 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2555 * let's continue the diving.
2557 assert(*pnum
<= bytes
);
2561 if (offset
+ *pnum
== eof
) {
2562 ret
|= BDRV_BLOCK_EOF
;
2568 int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2569 BlockDriverState
*base
,
2570 int64_t offset
, int64_t bytes
,
2571 int64_t *pnum
, int64_t *map
,
2572 BlockDriverState
**file
)
2575 return bdrv_co_common_block_status_above(bs
, base
, false, true, offset
,
2576 bytes
, pnum
, map
, file
, NULL
);
2579 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2580 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2581 int64_t *map
, BlockDriverState
**file
)
2584 return bdrv_common_block_status_above(bs
, base
, false, true, offset
, bytes
,
2585 pnum
, map
, file
, NULL
);
2588 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2589 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2592 return bdrv_block_status_above(bs
, bdrv_filter_or_cow_bs(bs
),
2593 offset
, bytes
, pnum
, map
, file
);
2597 * Check @bs (and its backing chain) to see if the range defined
2598 * by @offset and @bytes is known to read as zeroes.
2599 * Return 1 if that is the case, 0 otherwise and -errno on error.
2600 * This test is meant to be fast rather than accurate so returning 0
2601 * does not guarantee non-zero data.
2603 int coroutine_fn
bdrv_co_is_zero_fast(BlockDriverState
*bs
, int64_t offset
,
2607 int64_t pnum
= bytes
;
2614 ret
= bdrv_co_common_block_status_above(bs
, NULL
, false, false, offset
,
2615 bytes
, &pnum
, NULL
, NULL
, NULL
);
2621 return (pnum
== bytes
) && (ret
& BDRV_BLOCK_ZERO
);
2624 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2625 int64_t bytes
, int64_t *pnum
)
2631 ret
= bdrv_co_common_block_status_above(bs
, bs
, true, false, offset
,
2632 bytes
, pnum
? pnum
: &dummy
, NULL
,
2637 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2640 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2647 ret
= bdrv_common_block_status_above(bs
, bs
, true, false, offset
,
2648 bytes
, pnum
? pnum
: &dummy
, NULL
,
2653 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2656 /* See bdrv_is_allocated_above for documentation */
2657 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2658 BlockDriverState
*base
,
2659 bool include_base
, int64_t offset
,
2660 int64_t bytes
, int64_t *pnum
)
2666 ret
= bdrv_co_common_block_status_above(top
, base
, include_base
, false,
2667 offset
, bytes
, pnum
, NULL
, NULL
,
2673 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2680 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2682 * Return a positive depth if (a prefix of) the given range is allocated
2683 * in any image between BASE and TOP (BASE is only included if include_base
2684 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2685 * BASE can be NULL to check if the given offset is allocated in any
2686 * image of the chain. Return 0 otherwise, or negative errno on
2689 * 'pnum' is set to the number of bytes (including and immediately
2690 * following the specified offset) that are known to be in the same
2691 * allocated/unallocated state. Note that a subsequent call starting
2692 * at 'offset + *pnum' may return the same allocation status (in other
2693 * words, the result is not necessarily the maximum possible range);
2694 * but 'pnum' will only be 0 when end of file is reached.
2696 int bdrv_is_allocated_above(BlockDriverState
*top
,
2697 BlockDriverState
*base
,
2698 bool include_base
, int64_t offset
,
2699 int64_t bytes
, int64_t *pnum
)
2705 ret
= bdrv_common_block_status_above(top
, base
, include_base
, false,
2706 offset
, bytes
, pnum
, NULL
, NULL
,
2712 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2719 bdrv_co_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2721 BlockDriver
*drv
= bs
->drv
;
2722 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2725 assert_bdrv_graph_readable();
2727 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2736 bdrv_inc_in_flight(bs
);
2738 if (drv
->bdrv_co_load_vmstate
) {
2739 ret
= drv
->bdrv_co_load_vmstate(bs
, qiov
, pos
);
2740 } else if (child_bs
) {
2741 ret
= bdrv_co_readv_vmstate(child_bs
, qiov
, pos
);
2746 bdrv_dec_in_flight(bs
);
2752 bdrv_co_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2754 BlockDriver
*drv
= bs
->drv
;
2755 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2758 assert_bdrv_graph_readable();
2760 ret
= bdrv_check_qiov_request(pos
, qiov
->size
, qiov
, 0, NULL
);
2769 bdrv_inc_in_flight(bs
);
2771 if (drv
->bdrv_co_save_vmstate
) {
2772 ret
= drv
->bdrv_co_save_vmstate(bs
, qiov
, pos
);
2773 } else if (child_bs
) {
2774 ret
= bdrv_co_writev_vmstate(child_bs
, qiov
, pos
);
2779 bdrv_dec_in_flight(bs
);
2784 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2785 int64_t pos
, int size
)
2787 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2788 int ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2791 return ret
< 0 ? ret
: size
;
2794 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2795 int64_t pos
, int size
)
2797 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2798 int ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2801 return ret
< 0 ? ret
: size
;
2804 /**************************************************************/
2807 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2811 bdrv_aio_cancel_async(acb
);
2812 while (acb
->refcnt
> 1) {
2813 if (acb
->aiocb_info
->get_aio_context
) {
2814 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2815 } else if (acb
->bs
) {
2816 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2817 * assert that we're not using an I/O thread. Thread-safe
2818 * code should use bdrv_aio_cancel_async exclusively.
2820 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2821 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2826 qemu_aio_unref(acb
);
2829 /* Async version of aio cancel. The caller is not blocked if the acb implements
2830 * cancel_async, otherwise we do nothing and let the request normally complete.
2831 * In either case the completion callback must be called. */
2832 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2835 if (acb
->aiocb_info
->cancel_async
) {
2836 acb
->aiocb_info
->cancel_async(acb
);
2840 /**************************************************************/
2841 /* Coroutine block device emulation */
2843 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2845 BdrvChild
*primary_child
= bdrv_primary_child(bs
);
2851 assert_bdrv_graph_readable();
2852 bdrv_inc_in_flight(bs
);
2854 if (!bdrv_co_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2859 qemu_co_mutex_lock(&bs
->reqs_lock
);
2860 current_gen
= qatomic_read(&bs
->write_gen
);
2862 /* Wait until any previous flushes are completed */
2863 while (bs
->active_flush_req
) {
2864 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2867 /* Flushes reach this point in nondecreasing current_gen order. */
2868 bs
->active_flush_req
= true;
2869 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2871 /* Write back all layers by calling one driver function */
2872 if (bs
->drv
->bdrv_co_flush
) {
2873 ret
= bs
->drv
->bdrv_co_flush(bs
);
2877 /* Write back cached data to the OS even with cache=unsafe */
2878 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_OS
);
2879 if (bs
->drv
->bdrv_co_flush_to_os
) {
2880 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2886 /* But don't actually force it to the disk with cache=unsafe */
2887 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2888 goto flush_children
;
2891 /* Check if we really need to flush anything */
2892 if (bs
->flushed_gen
== current_gen
) {
2893 goto flush_children
;
2896 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_DISK
);
2898 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2899 * (even in case of apparent success) */
2903 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2904 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2905 } else if (bs
->drv
->bdrv_aio_flush
) {
2907 CoroutineIOCompletion co
= {
2908 .coroutine
= qemu_coroutine_self(),
2911 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2915 qemu_coroutine_yield();
2920 * Some block drivers always operate in either writethrough or unsafe
2921 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2922 * know how the server works (because the behaviour is hardcoded or
2923 * depends on server-side configuration), so we can't ensure that
2924 * everything is safe on disk. Returning an error doesn't work because
2925 * that would break guests even if the server operates in writethrough
2928 * Let's hope the user knows what he's doing.
2937 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2938 * in the case of cache=unsafe, so there are no useless flushes.
2942 QLIST_FOREACH(child
, &bs
->children
, next
) {
2943 if (child
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
)) {
2944 int this_child_ret
= bdrv_co_flush(child
->bs
);
2946 ret
= this_child_ret
;
2952 /* Notify any pending flushes that we have completed */
2954 bs
->flushed_gen
= current_gen
;
2957 qemu_co_mutex_lock(&bs
->reqs_lock
);
2958 bs
->active_flush_req
= false;
2959 /* Return value is ignored - it's ok if wait queue is empty */
2960 qemu_co_queue_next(&bs
->flush_queue
);
2961 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2964 bdrv_dec_in_flight(bs
);
2968 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
2971 BdrvTrackedRequest req
;
2973 int64_t max_pdiscard
;
2974 int head
, tail
, align
;
2975 BlockDriverState
*bs
= child
->bs
;
2977 assert_bdrv_graph_readable();
2979 if (!bs
|| !bs
->drv
|| !bdrv_co_is_inserted(bs
)) {
2983 if (bdrv_has_readonly_bitmaps(bs
)) {
2987 ret
= bdrv_check_request(offset
, bytes
, NULL
);
2992 /* Do nothing if disabled. */
2993 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2997 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
3001 /* Invalidate the cached block-status data range if this discard overlaps */
3002 bdrv_bsc_invalidate_range(bs
, offset
, bytes
);
3004 /* Discard is advisory, but some devices track and coalesce
3005 * unaligned requests, so we must pass everything down rather than
3006 * round here. Still, most devices will just silently ignore
3007 * unaligned requests (by returning -ENOTSUP), so we must fragment
3008 * the request accordingly. */
3009 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
3010 assert(align
% bs
->bl
.request_alignment
== 0);
3011 head
= offset
% align
;
3012 tail
= (offset
+ bytes
) % align
;
3014 bdrv_inc_in_flight(bs
);
3015 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
3017 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
3022 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT64_MAX
),
3024 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
3027 int64_t num
= bytes
;
3030 /* Make small requests to get to alignment boundaries. */
3031 num
= MIN(bytes
, align
- head
);
3032 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
3033 num
%= bs
->bl
.request_alignment
;
3035 head
= (head
+ num
) % align
;
3036 assert(num
< max_pdiscard
);
3039 /* Shorten the request to the last aligned cluster. */
3041 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
3042 tail
> bs
->bl
.request_alignment
) {
3043 tail
%= bs
->bl
.request_alignment
;
3047 /* limit request size */
3048 if (num
> max_pdiscard
) {
3056 if (bs
->drv
->bdrv_co_pdiscard
) {
3057 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
3060 CoroutineIOCompletion co
= {
3061 .coroutine
= qemu_coroutine_self(),
3064 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
3065 bdrv_co_io_em_complete
, &co
);
3070 qemu_coroutine_yield();
3074 if (ret
&& ret
!= -ENOTSUP
) {
3083 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
3084 tracked_request_end(&req
);
3085 bdrv_dec_in_flight(bs
);
3089 int coroutine_fn
bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
3091 BlockDriver
*drv
= bs
->drv
;
3092 CoroutineIOCompletion co
= {
3093 .coroutine
= qemu_coroutine_self(),
3097 assert_bdrv_graph_readable();
3099 bdrv_inc_in_flight(bs
);
3100 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
3105 if (drv
->bdrv_co_ioctl
) {
3106 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
3108 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3113 qemu_coroutine_yield();
3116 bdrv_dec_in_flight(bs
);
3120 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3123 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3126 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3129 return memset(qemu_blockalign(bs
, size
), 0, size
);
3132 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3134 size_t align
= bdrv_opt_mem_align(bs
);
3137 /* Ensure that NULL is never returned on success */
3143 return qemu_try_memalign(align
, size
);
3146 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3148 void *mem
= qemu_try_blockalign(bs
, size
);
3152 memset(mem
, 0, size
);
3158 void coroutine_fn
bdrv_co_io_plug(BlockDriverState
*bs
)
3162 assert_bdrv_graph_readable();
3164 QLIST_FOREACH(child
, &bs
->children
, next
) {
3165 bdrv_co_io_plug(child
->bs
);
3168 if (qatomic_fetch_inc(&bs
->io_plugged
) == 0) {
3169 BlockDriver
*drv
= bs
->drv
;
3170 if (drv
&& drv
->bdrv_co_io_plug
) {
3171 drv
->bdrv_co_io_plug(bs
);
3176 void coroutine_fn
bdrv_co_io_unplug(BlockDriverState
*bs
)
3180 assert_bdrv_graph_readable();
3182 assert(bs
->io_plugged
);
3183 if (qatomic_fetch_dec(&bs
->io_plugged
) == 1) {
3184 BlockDriver
*drv
= bs
->drv
;
3185 if (drv
&& drv
->bdrv_co_io_unplug
) {
3186 drv
->bdrv_co_io_unplug(bs
);
3190 QLIST_FOREACH(child
, &bs
->children
, next
) {
3191 bdrv_co_io_unplug(child
->bs
);
3195 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3196 static void GRAPH_RDLOCK
3197 bdrv_register_buf_rollback(BlockDriverState
*bs
, void *host
, size_t size
,
3198 BdrvChild
*final_child
)
3202 GLOBAL_STATE_CODE();
3203 assert_bdrv_graph_readable();
3205 QLIST_FOREACH(child
, &bs
->children
, next
) {
3206 if (child
== final_child
) {
3210 bdrv_unregister_buf(child
->bs
, host
, size
);
3213 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3214 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3218 bool bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
,
3223 GLOBAL_STATE_CODE();
3224 GRAPH_RDLOCK_GUARD_MAINLOOP();
3226 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3227 if (!bs
->drv
->bdrv_register_buf(bs
, host
, size
, errp
)) {
3231 QLIST_FOREACH(child
, &bs
->children
, next
) {
3232 if (!bdrv_register_buf(child
->bs
, host
, size
, errp
)) {
3233 bdrv_register_buf_rollback(bs
, host
, size
, child
);
3240 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3244 GLOBAL_STATE_CODE();
3245 GRAPH_RDLOCK_GUARD_MAINLOOP();
3247 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3248 bs
->drv
->bdrv_unregister_buf(bs
, host
, size
);
3250 QLIST_FOREACH(child
, &bs
->children
, next
) {
3251 bdrv_unregister_buf(child
->bs
, host
, size
);
3255 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_copy_range_internal(
3256 BdrvChild
*src
, int64_t src_offset
, BdrvChild
*dst
,
3257 int64_t dst_offset
, int64_t bytes
,
3258 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3261 BdrvTrackedRequest req
;
3263 assert_bdrv_graph_readable();
3265 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3266 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3267 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3268 assert(!(read_flags
& BDRV_REQ_NO_WAIT
));
3269 assert(!(write_flags
& BDRV_REQ_NO_WAIT
));
3271 if (!dst
|| !dst
->bs
|| !bdrv_co_is_inserted(dst
->bs
)) {
3274 ret
= bdrv_check_request32(dst_offset
, bytes
, NULL
, 0);
3278 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3279 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3282 if (!src
|| !src
->bs
|| !bdrv_co_is_inserted(src
->bs
)) {
3285 ret
= bdrv_check_request32(src_offset
, bytes
, NULL
, 0);
3290 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3291 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3292 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3297 bdrv_inc_in_flight(src
->bs
);
3298 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3301 /* BDRV_REQ_SERIALISING is only for write operation */
3302 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3303 bdrv_wait_serialising_requests(&req
);
3305 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3309 read_flags
, write_flags
);
3311 tracked_request_end(&req
);
3312 bdrv_dec_in_flight(src
->bs
);
3314 bdrv_inc_in_flight(dst
->bs
);
3315 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3316 BDRV_TRACKED_WRITE
);
3317 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3320 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3324 read_flags
, write_flags
);
3326 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3327 tracked_request_end(&req
);
3328 bdrv_dec_in_flight(dst
->bs
);
3334 /* Copy range from @src to @dst.
3336 * See the comment of bdrv_co_copy_range for the parameter and return value
3338 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, int64_t src_offset
,
3339 BdrvChild
*dst
, int64_t dst_offset
,
3341 BdrvRequestFlags read_flags
,
3342 BdrvRequestFlags write_flags
)
3345 assert_bdrv_graph_readable();
3346 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3347 read_flags
, write_flags
);
3348 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3349 bytes
, read_flags
, write_flags
, true);
3352 /* Copy range from @src to @dst.
3354 * See the comment of bdrv_co_copy_range for the parameter and return value
3356 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, int64_t src_offset
,
3357 BdrvChild
*dst
, int64_t dst_offset
,
3359 BdrvRequestFlags read_flags
,
3360 BdrvRequestFlags write_flags
)
3363 assert_bdrv_graph_readable();
3364 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3365 read_flags
, write_flags
);
3366 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3367 bytes
, read_flags
, write_flags
, false);
3370 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, int64_t src_offset
,
3371 BdrvChild
*dst
, int64_t dst_offset
,
3372 int64_t bytes
, BdrvRequestFlags read_flags
,
3373 BdrvRequestFlags write_flags
)
3376 assert_bdrv_graph_readable();
3378 return bdrv_co_copy_range_from(src
, src_offset
,
3380 bytes
, read_flags
, write_flags
);
3383 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3386 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3387 if (c
->klass
->resize
) {
3388 c
->klass
->resize(c
);
3394 * Truncate file to 'offset' bytes (needed only for file protocols)
3396 * If 'exact' is true, the file must be resized to exactly the given
3397 * 'offset'. Otherwise, it is sufficient for the node to be at least
3398 * 'offset' bytes in length.
3400 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3401 PreallocMode prealloc
, BdrvRequestFlags flags
,
3404 BlockDriverState
*bs
= child
->bs
;
3405 BdrvChild
*filtered
, *backing
;
3406 BlockDriver
*drv
= bs
->drv
;
3407 BdrvTrackedRequest req
;
3408 int64_t old_size
, new_bytes
;
3411 assert_bdrv_graph_readable();
3413 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3415 error_setg(errp
, "No medium inserted");
3419 error_setg(errp
, "Image size cannot be negative");
3423 ret
= bdrv_check_request(offset
, 0, errp
);
3428 old_size
= bdrv_getlength(bs
);
3430 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3434 if (bdrv_is_read_only(bs
)) {
3435 error_setg(errp
, "Image is read-only");
3439 if (offset
> old_size
) {
3440 new_bytes
= offset
- old_size
;
3445 bdrv_inc_in_flight(bs
);
3446 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3447 BDRV_TRACKED_TRUNCATE
);
3449 /* If we are growing the image and potentially using preallocation for the
3450 * new area, we need to make sure that no write requests are made to it
3451 * concurrently or they might be overwritten by preallocation. */
3453 bdrv_make_request_serialising(&req
, 1);
3455 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3458 error_setg_errno(errp
, -ret
,
3459 "Failed to prepare request for truncation");
3463 filtered
= bdrv_filter_child(bs
);
3464 backing
= bdrv_cow_child(bs
);
3467 * If the image has a backing file that is large enough that it would
3468 * provide data for the new area, we cannot leave it unallocated because
3469 * then the backing file content would become visible. Instead, zero-fill
3472 * Note that if the image has a backing file, but was opened without the
3473 * backing file, taking care of keeping things consistent with that backing
3474 * file is the user's responsibility.
3476 if (new_bytes
&& backing
) {
3477 int64_t backing_len
;
3479 backing_len
= bdrv_co_getlength(backing
->bs
);
3480 if (backing_len
< 0) {
3482 error_setg_errno(errp
, -ret
, "Could not get backing file size");
3486 if (backing_len
> old_size
) {
3487 flags
|= BDRV_REQ_ZERO_WRITE
;
3491 if (drv
->bdrv_co_truncate
) {
3492 if (flags
& ~bs
->supported_truncate_flags
) {
3493 error_setg(errp
, "Block driver does not support requested flags");
3497 ret
= drv
->bdrv_co_truncate(bs
, offset
, exact
, prealloc
, flags
, errp
);
3498 } else if (filtered
) {
3499 ret
= bdrv_co_truncate(filtered
, offset
, exact
, prealloc
, flags
, errp
);
3501 error_setg(errp
, "Image format driver does not support resize");
3509 ret
= bdrv_co_refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3511 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3513 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3516 * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3517 * failed, but the latter doesn't affect how we should finish the request.
3518 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3520 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3523 tracked_request_end(&req
);
3524 bdrv_dec_in_flight(bs
);
3529 void bdrv_cancel_in_flight(BlockDriverState
*bs
)
3531 GLOBAL_STATE_CODE();
3532 if (!bs
|| !bs
->drv
) {
3536 if (bs
->drv
->bdrv_cancel_in_flight
) {
3537 bs
->drv
->bdrv_cancel_in_flight(bs
);
3542 bdrv_co_preadv_snapshot(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
3543 QEMUIOVector
*qiov
, size_t qiov_offset
)
3545 BlockDriverState
*bs
= child
->bs
;
3546 BlockDriver
*drv
= bs
->drv
;
3549 assert_bdrv_graph_readable();
3555 if (!drv
->bdrv_co_preadv_snapshot
) {
3559 bdrv_inc_in_flight(bs
);
3560 ret
= drv
->bdrv_co_preadv_snapshot(bs
, offset
, bytes
, qiov
, qiov_offset
);
3561 bdrv_dec_in_flight(bs
);
3567 bdrv_co_snapshot_block_status(BlockDriverState
*bs
,
3568 bool want_zero
, int64_t offset
, int64_t bytes
,
3569 int64_t *pnum
, int64_t *map
,
3570 BlockDriverState
**file
)
3572 BlockDriver
*drv
= bs
->drv
;
3575 assert_bdrv_graph_readable();
3581 if (!drv
->bdrv_co_snapshot_block_status
) {
3585 bdrv_inc_in_flight(bs
);
3586 ret
= drv
->bdrv_co_snapshot_block_status(bs
, want_zero
, offset
, bytes
,
3588 bdrv_dec_in_flight(bs
);
3594 bdrv_co_pdiscard_snapshot(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
3596 BlockDriver
*drv
= bs
->drv
;
3599 assert_bdrv_graph_readable();
3605 if (!drv
->bdrv_co_pdiscard_snapshot
) {
3609 bdrv_inc_in_flight(bs
);
3610 ret
= drv
->bdrv_co_pdiscard_snapshot(bs
, offset
, bytes
);
3611 bdrv_dec_in_flight(bs
);